Merge "Merge android-4.9.204(72e8598) into msm-4.9"
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 4a5a887..ee4cc72 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -314,6 +314,8 @@
 	- Hotpluggable memory support, how to use and current status.
 metag/
 	- directory with info about Linux on Meta architecture.
+mhi.txt
+	- Modem Host Interface
 mips/
 	- directory with info about Linux on MIPS architecture.
 misc-devices/
diff --git a/Documentation/devicetree/bindings/bus/mhi.txt b/Documentation/devicetree/bindings/bus/mhi.txt
new file mode 100644
index 0000000..dfbd846
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/mhi.txt
@@ -0,0 +1,320 @@
+MHI Host Interface
+
+MHI used by the host to control and communicate with modem over
+high speed peripheral bus.
+
+==============
+Node Structure
+==============
+
+Main node properties:
+
+- mhi,max-channels
+  Usage: required
+  Value type: <u32>
+  Definition: Maximum number of channels supported by this controller
+
+- mhi,timeout
+  Usage: optional
+  Value type: <u32>
+  Definition: Maximum timeout in ms wait for state and cmd completion
+
+- mhi,use-bb
+  Usage: optional
+  Value type: <bool>
+  Definition: Set true, if PCIe controller does not have full access to host
+	DDR, and we're using a dedicated memory pool like cma, or
+	carveout pool. Pool must support atomic allocation.
+
+- mhi,buffer-len
+  Usage: optional
+  Value type: <bool>
+  Definition: MHI automatically pre-allocate buffers for some channel.
+	Set the length of buffer size to allocate. If not default
+	size MHI_MAX_MTU will be used.
+
+============================
+mhi channel node properties:
+============================
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: physical channel number
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: given name for the channel
+
+- mhi,num-elements
+  Usage: optional
+  Value type: <u32>
+  Definition: Number of elements transfer ring support
+
+- mhi,event-ring
+  Usage: required
+  Value type: <u32>
+  Definition: Event ring index associated with this channel
+
+- mhi,chan-dir
+  Usage: required
+  Value type: <u32>
+  Definition: Channel direction as defined by enum dma_data_direction
+	0 = Bidirectional data transfer
+	1 = UL data transfer
+	2 = DL data transfer
+	3 = No direction, not a regular data transfer channel
+
+- mhi,ee
+  Usage: required
+  Value type: <u32>
+  Definition: Channel execution enviornment (EE) mask as defined by enum
+	mhi_ch_ee_mask
+		BIT(0) = Channel supported in PBL EE
+		BIT(1) = Channel supported in SBL EE
+		BIT(2) = Channel supported in AMSS EE
+		BIT(3) = Channel supported in RDDM EE
+		BIT(4) = Channel supported in WFW EE
+		BIT(5) = Channel supported in PTHRU EE
+		BIT(6) = Channel supported in EDL EE
+
+- mhi,pollcfg
+  Usage: optional
+  Value type: <u32>
+  Definition: MHI poll configuration, valid only when burst mode is enabled
+	0 = Use default (device specific) polling configuration
+	For UL channels, value specifies the timer to poll MHI context in
+	milliseconds.
+	For DL channels, the threshold to poll the MHI context in multiple of
+	eight ring element.
+
+- mhi,data-type
+  Usage: required
+  Value type: <u32>
+  Definition: Data transfer type accepted as defined by enum MHI_XFER_TYPE
+	0 = accept cpu address for buffer
+	1 = accept skb
+	2 = accept scatterlist
+	3 = offload channel, does not accept any transfer type
+	4 = accept pre-mapped buffers
+	5 = rsc channel type, accept pre-mapped buffers
+
+- mhi,doorbell-mode
+  Usage: required
+  Value type: <u32>
+  Definition: Channel doorbell mode configuration as defined by enum
+	MHI_BRSTMODE
+	2 = burst mode disabled
+	3 = burst mode enabled
+
+- mhi,lpm-notify
+  Usage: optional
+  Value type: <bool>
+  Definition: This channel master require low power mode enter and exit
+  notifications from mhi bus master.
+
+- mhi,offload-chan
+  Usage: optional
+  Value type: <bool>
+  Definition: Client managed channel, MHI host only involved in setting up
+	the data path, not involved in active data path.
+
+- mhi,db-mode-switch
+  Usage: optional
+  Value type: <bool>
+  Definition: Must switch to doorbell mode whenever MHI M0 state transition
+	happens.
+
+- mhi,auto-queue
+  Usage: optional
+  Value type: <bool>
+  Definition: MHI bus driver will pre-allocate buffers for this channel and
+	queue to hardware. If set, client not allowed to queue buffers. Valid
+	only for downlink direction.
+
+- mhi,auto-start
+  Usage: optional
+  Value type: <bool>
+  Definition: MHI host driver to automatically start channels once mhi device
+	driver probe is complete. This should be only set true if initial
+	handshake iniaitead by external modem.
+
+- mhi,wake-capable
+  Usage: optional
+  Value type: <bool>
+  Definition: Time sensitive data channel, host should process all pending data
+	before system suspend.
+
+- mhi,chan-type
+  Usage: optional
+  Value type: <u32>
+  Definition: By default, chan-type is same as 'chan,dir' property except
+	in some special channels, chan type supplement chan direction.
+	3 = default no direction, or inbound coalesced channel
+
+==========================
+mhi event node properties:
+==========================
+
+- mhi,num-elements
+  Usage: required
+  Value type: <u32>
+  Definition: Number of elements event ring support
+
+- mhi,intmod
+  Usage: required
+  Value type: <u32>
+  Definition: interrupt moderation time in ms
+
+- mhi,msi
+  Usage: required
+  Value type: <u32>
+  Definition: MSI associated with this event ring
+
+- mhi,chan
+  Usage: optional
+  Value type: <u32>
+  Definition: Dedicated channel number, if it's a dedicated event ring
+
+- mhi,priority
+  Usage: required
+  Value type: <u32>
+  Definition: Event ring priority, set to 1 for now
+
+- mhi,brstmode
+  Usage: required
+  Value type: <u32>
+  Definition: Event doorbell mode configuration as defined by
+	enum MHI_BRSTMODE
+		2 = burst mode disabled
+		3 = burst mode enabled
+
+- mhi,data-type
+  Usage: optional
+  Value type: <u32>
+  Definition: Type of data this event ring will process as defined
+	by enum mhi_er_data_type
+		0 = process data packets (default)
+		1 = process mhi control packets
+
+- mhi,hw-ev
+  Usage: optional
+  Value type: <bool>
+  Definition: Event ring associated with hardware channels
+
+- mhi,client-manage
+  Usage: optional
+  Value type: <bool>
+  Definition: Client manages the event ring (use by napi_poll)
+
+- mhi,offload
+  Usage: optional
+  Value type: <bool>
+  Definition: Event ring associated with offload channel
+
+
+Children node properties:
+
+MHI drivers that require DT can add driver specific information as a child node.
+
+- mhi,chan
+  Usage: Required
+  Value type: <string>
+  Definition: Channel name
+
+========
+Example:
+========
+mhi_controller {
+	mhi,max-channels = <105>;
+
+	mhi_chan@0 {
+		reg = <0>;
+		label = "LOOPBACK";
+		mhi,num-elements = <64>;
+		mhi,event-ring = <2>;
+		mhi,chan-dir = <1>;
+		mhi,data-type = <0>;
+		mhi,doorbell-mode = <2>;
+		mhi,ee = <0x4>;
+	};
+
+	mhi_chan@1 {
+		reg = <1>;
+		label = "LOOPBACK";
+		mhi,num-elements = <64>;
+		mhi,event-ring = <2>;
+		mhi,chan-dir = <2>;
+		mhi,data-type = <0>;
+		mhi,doorbell-mode = <2>;
+		mhi,ee = <0x4>;
+	};
+
+	mhi_event@0 {
+		mhi,num-elements = <32>;
+		mhi,intmod = <1>;
+		mhi,msi = <1>;
+		mhi,chan = <0>;
+		mhi,priority = <1>;
+		mhi,bstmode = <2>;
+		mhi,data-type = <1>;
+	};
+
+	mhi_event@1 {
+		mhi,num-elements = <256>;
+		mhi,intmod = <1>;
+		mhi,msi = <2>;
+		mhi,chan = <0>;
+		mhi,priority = <1>;
+		mhi,bstmode = <2>;
+	};
+
+	mhi,timeout = <500>;
+
+	children_node {
+		mhi,chan = "LOOPBACK"
+		<driver specific properties>
+	};
+};
+
+================
+Children Devices
+================
+
+MHI netdev properties
+
+- mhi,chan
+  Usage: required
+  Value type: <string>
+  Definition: Channel name MHI netdev support
+
+- mhi,mru
+  Usage: required
+  Value type: <u32>
+  Definition: Largest packet size interface can receive in bytes.
+
+- mhi,interface-name
+  Usage: optional
+  Value type: <string>
+  Definition: Interface name to be given so clients can identify it
+
+- aliases
+  Usage: required
+  Value type: <string>
+  Definition: mhi net_device should have numbered alias in the alias node,
+	in the form of mhi_netdevN, N = 0, 1..n for each network interface.
+
+========
+Example:
+========
+
+aliases {
+	mhi_netdev0 = &mhi_netdev_0;
+};
+
+mhi_netdev_0: mhi_rmnet@0 {
+	mhi,chan = "IP_HW0";
+	mhi,interface-name = "rmnet_mhi";
+	mhi,mru = <0x4000>;
+};
diff --git a/Documentation/mhi.txt b/Documentation/mhi.txt
new file mode 100644
index 0000000..9287899
--- /dev/null
+++ b/Documentation/mhi.txt
@@ -0,0 +1,276 @@
+Overview of Linux kernel MHI support
+====================================
+
+Modem-Host Interface (MHI)
+=========================
+MHI used by the host to control and communicate with modem over high speed
+peripheral bus. Even though MHI can be easily adapt to any peripheral buses,
+primarily used with PCIe based devices. The host has one or more PCIe root
+ports connected to modem device. The host has limited access to device memory
+space, including register configuration and control the device operation.
+Data transfers are invoked from the device.
+
+All data structures used by MHI are in the host system memory. Using PCIe
+interface, the device accesses those data structures. MHI data structures and
+data buffers in the host system memory regions are mapped for device.
+
+Memory spaces
+-------------
+PCIe Configurations : Used for enumeration and resource management, such as
+interrupt and base addresses.  This is done by mhi control driver.
+
+MMIO
+----
+MHI MMIO : Memory mapped IO consists of set of registers in the device hardware,
+which are mapped to the host memory space through PCIe base address register
+(BAR)
+
+MHI control registers : Access to MHI configurations registers
+(struct mhi_controller.regs).
+
+MHI BHI register: Boot host interface registers (struct mhi_controller.bhi) used
+for firmware download before MHI initialization.
+
+Channel db array : Doorbell registers (struct mhi_chan.tre_ring.db_addr) used by
+host to notify device there is new work to do.
+
+Event db array : Associated with event context array
+(struct mhi_event.ring.db_addr), host uses to notify device free events are
+available.
+
+Data structures
+---------------
+Host memory : Directly accessed by the host to manage the MHI data structures
+and buffers. The device accesses the host memory over the PCIe interface.
+
+Channel context array : All channel configurations are organized in channel
+context data array.
+
+struct __packed mhi_chan_ctxt;
+struct mhi_ctxt.chan_ctxt;
+
+Transfer rings : Used by host to schedule work items for a channel and organized
+as a circular queue of transfer descriptors (TD).
+
+struct __packed mhi_tre;
+struct mhi_chan.tre_ring;
+
+Event context array : All event configurations are organized in event context
+data array.
+
+struct mhi_ctxt.er_ctxt;
+struct __packed mhi_event_ctxt;
+
+Event rings: Used by device to send completion and state transition messages to
+host
+
+struct mhi_event.ring;
+struct __packed mhi_tre;
+
+Command context array: All command configurations are organized in command
+context data array.
+
+struct __packed mhi_cmd_ctxt;
+struct mhi_ctxt.cmd_ctxt;
+
+Command rings: Used by host to send MHI commands to device
+
+struct __packed mhi_tre;
+struct mhi_cmd.ring;
+
+Transfer rings
+--------------
+MHI channels are logical, unidirectional data pipes between host and device.
+Each channel associated with a single transfer ring.  The data direction can be
+either inbound (device to host) or outbound (host to device).  Transfer
+descriptors are managed by using transfer rings, which are defined for each
+channel between device and host and resides in the host memory.
+
+Transfer ring Pointer:	  	Transfer Ring Array
+[Read Pointer (RP)] ----------->[Ring Element] } TD
+[Write Pointer (WP)]-		[Ring Element]
+                     -		[Ring Element]
+		      --------->[Ring Element]
+				[Ring Element]
+
+1. Host allocate memory for transfer ring
+2. Host sets base, read pointer, write pointer in corresponding channel context
+3. Ring is considered empty when RP == WP
+4. Ring is considered full when WP + 1 == RP
+4. RP indicates the next element to be serviced by device
+4. When host new buffer to send, host update the Ring element with buffer
+   information
+5. Host increment the WP to next element
+6. Ring the associated channel DB.
+
+Event rings
+-----------
+Events from the device to host are organized in event rings and defined in event
+descriptors.  Event rings are array of EDs that resides in the host memory.
+
+Transfer ring Pointer:	  	Event Ring Array
+[Read Pointer (RP)] ----------->[Ring Element] } ED
+[Write Pointer (WP)]-		[Ring Element]
+                     -		[Ring Element]
+		      --------->[Ring Element]
+				[Ring Element]
+
+1. Host allocate memory for event ring
+2. Host sets base, read pointer, write pointer in corresponding channel context
+3. Both host and device has local copy of RP, WP
+3. Ring is considered empty (no events to service) when WP + 1 == RP
+4. Ring is full of events when RP == WP
+4. RP - 1 = last event device programmed
+4. When there is a new event device need to send, device update ED pointed by RP
+5. Device increment RP to next element
+6. Device trigger and interrupt
+
+Example Operation for data transfer:
+
+1. Host prepare TD with buffer information
+2. Host increment Chan[id].ctxt.WP
+3. Host ring channel DB register
+4. Device wakes up process the TD
+5. Device generate a completion event for that TD by updating ED
+6. Device increment Event[id].ctxt.RP
+7. Device trigger MSI to wake host
+8. Host wakes up and check event ring for completion event
+9. Host update the Event[i].ctxt.WP to indicate processed of completion event.
+
+Time sync
+---------
+To synchronize two applications between host and external modem, MHI provide
+native support to get external modems free running timer value in a fast
+reliable method. MHI clients do not need to create client specific methods to
+get modem time.
+
+When client requests modem time, MHI host will automatically capture host time
+at that moment so clients are able to do accurate drift adjustment.
+
+Example:
+
+Client request time @ time T1
+
+Host Time: Tx
+Modem Time: Ty
+
+Client request time @ time T2
+Host Time: Txx
+Modem Time: Tyy
+
+Then drift is:
+Tyy - Ty + <drift> == Txx - Tx
+
+Clients are free to implement their own drift algorithms, what MHI host provide
+is a way to accurately correlate host time with external modem time.
+
+To avoid link level latencies, controller must support capabilities to disable
+any link level latency.
+
+During Time capture host will:
+	1. Capture host time
+	2. Trigger doorbell to capture modem time
+
+It's important time between Step 2 to Step 1 is deterministic as possible.
+Therefore, MHI host will:
+	1. Disable any MHI related to low power modes.
+	2. Disable preemption
+	3. Request bus master to disable any link level latencies. Controller
+	should disable all low power modes such as L0s, L1, L1ss.
+
+MHI States
+----------
+
+enum MHI_STATE {
+MHI_STATE_RESET : MHI is in reset state, POR state. Host is not allowed to
+		  access device MMIO register space.
+MHI_STATE_READY : Device is ready for initialization. Host can start MHI
+		  initialization by programming MMIO
+MHI_STATE_M0 : MHI is in fully active state, data transfer is active
+MHI_STATE_M1 : Device in a suspended state
+MHI_STATE_M2 : MHI in low power mode, device may enter lower power mode.
+MHI_STATE_M3 : Both host and device in suspended state.  PCIe link is not
+	       accessible to device.
+
+MHI Initialization
+------------------
+
+1. After system boots, the device is enumerated over PCIe interface
+2. Host allocate MHI context for event, channel and command arrays
+3. Initialize context array, and prepare interrupts
+3. Host waits until device enter READY state
+4. Program MHI MMIO registers and set device into MHI_M0 state
+5. Wait for device to enter M0 state
+
+Linux Software Architecture
+===========================
+
+MHI Controller
+--------------
+MHI controller is also the MHI bus master. In charge of managing the physical
+link between host and device.  Not involved in actual data transfer.  At least
+for PCIe based buses, for other type of bus, we can expand to add support.
+
+Roles:
+1. Turn on PCIe bus and configure the link
+2. Configure MSI, SMMU, and IOMEM
+3. Allocate struct mhi_controller and register with MHI bus framework
+2. Initiate power on and shutdown sequence
+3. Initiate suspend and resume
+
+Usage
+-----
+
+1. Allocate control data structure by calling mhi_alloc_controller()
+2. Initialize mhi_controller with all the known information such as:
+   - Device Topology
+   - IOMMU window
+   - IOMEM mapping
+   - Device to use for memory allocation, and of_node with DT configuration
+   - Configure asynchronous callback functions
+3. Register MHI controller with MHI bus framework by calling
+   of_register_mhi_controller()
+
+After successfully registering controller can initiate any of these power modes:
+
+1. Power up sequence
+   - mhi_prepare_for_power_up()
+   - mhi_async_power_up()
+   - mhi_sync_power_up()
+2. Power down sequence
+   - mhi_power_down()
+   - mhi_unprepare_after_power_down()
+3. Initiate suspend
+   - mhi_pm_suspend()
+4. Initiate resume
+   - mhi_pm_resume()
+
+MHI Devices
+-----------
+Logical device that bind to maximum of two physical MHI channels. Once MHI is in
+powered on state, each supported channel by controller will be allocated as a
+mhi_device.
+
+Each supported device would be enumerated under
+/sys/bus/mhi/devices/
+
+struct mhi_device;
+
+MHI Driver
+----------
+Each MHI driver can bind to one or more MHI devices. MHI host driver will bind
+mhi_device to mhi_driver.
+
+All registered drivers are visible under
+/sys/bus/mhi/drivers/
+
+struct mhi_driver;
+
+Usage
+-----
+
+1. Register driver using mhi_driver_register
+2. Before sending data, prepare device for transfer by calling
+   mhi_prepare_for_transfer
+3. Initiate data transfer by calling mhi_queue_transfer
+4. After finish, call mhi_unprepare_from_transfer to end data transfer
diff --git a/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi b/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi
index 92c75dd..00564c7 100644
--- a/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi
+++ b/arch/arm/boot/dts/qcom/sa415m-ccard.dtsi
@@ -51,6 +51,8 @@
 &i2c_4 {
 	asm330@6a {
 		reg = <0x6b>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&sensor_enable_default>;
 	};
 };
 
diff --git a/arch/arm/boot/dts/qcom/sa415m-cdp.dts b/arch/arm/boot/dts/qcom/sa415m-cdp.dts
index b021bf0..b9e7d49 100644
--- a/arch/arm/boot/dts/qcom/sa415m-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sa415m-cdp.dts
@@ -12,7 +12,7 @@
 
 /dts-v1/;
 
-#include "sa415m-cdp.dtsi"
+#include "sdxpoorwills-cdp.dtsi"
 #include "sdxpoorwills-display.dtsi"
 #include "qpic-panel-ili-hvga.dtsi"
 
diff --git a/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi b/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi
deleted file mode 100644
index edc08cc..0000000
--- a/arch/arm/boot/dts/qcom/sa415m-cdp.dtsi
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "sdxpoorwills-cdp.dtsi"
-
-&vbus_detect {
-	status = "okay";
-};
-
-&usb {
-	status = "okay";
-	qcom,connector-type-uAB;
-	extcon = <0>, <0>, <0>, <&vbus_detect>;
-	dwc3@a600000 {
-		normal-eps-in-gsi-mode;
-	};
-};
-
-&smb138x {
-	status = "disabled";
-};
diff --git a/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts b/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts
index 39ed5c6..913b3a8 100644
--- a/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts
+++ b/arch/arm/boot/dts/qcom/sa415m-mtp-256.dts
@@ -32,3 +32,17 @@
 &smb138x {
 	status = "disabled";
 };
+
+&thermal_zones {
+	mdm-core-step {
+		trips {
+			mdm-step-trip-0 {
+				temperature = <105000>;
+			};
+
+			mdm-step-trip-1 {
+				temperature = <115000>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi b/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi
index 96167a6..29091d3 100644
--- a/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi
+++ b/arch/arm/boot/dts/qcom/sa415m-ttp.dtsi
@@ -99,6 +99,20 @@
 	status = "disabled";
 };
 
+&thermal_zones {
+	mdm-core-step {
+		trips {
+			mdm-step-trip-0 {
+				temperature = <105000>;
+			};
+
+			mdm-step-trip-1 {
+				temperature = <115000>;
+			};
+		};
+	};
+};
+
 &i2c_4 {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts b/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts
index 5542ab4..c8b7891 100644
--- a/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sa415m-v2-cdp.dts
@@ -12,7 +12,7 @@
 
 /dts-v1/;
 
-#include "sa415m-cdp.dtsi"
+#include "sdxpoorwills-cdp.dtsi"
 #include "sdxpoorwills-v2.dtsi"
 #include "sdxpoorwills-display.dtsi"
 #include "qpic-panel-ili-hvga.dtsi"
diff --git a/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts b/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts
index fe59e05..4e0aefe 100644
--- a/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sa415m-v2-mtp.dts
@@ -37,3 +37,17 @@
 &smb138x {
 	status = "disabled";
 };
+
+&thermal_zones {
+	mdm-core-step {
+		trips {
+			mdm-step-trip-0 {
+				temperature = <105000>;
+			};
+
+			mdm-step-trip-1 {
+				temperature = <115000>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index f441b3f..033f8a3 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -1751,6 +1751,20 @@
 				output-high;
 			};
 		};
+
+		sensor_enable_default: sensor_enable_default {
+			mux {
+				pins = "gpio90";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio90";
+				bias-pull-up;
+				drive-strength = <16>;
+				output-high;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index 458d60a..4ad91f3 100755
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -400,6 +400,7 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
+CONFIG_MSM_RMNET_BAM=y
 CONFIG_MSM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index 38d48ac..61e623d 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -442,6 +442,7 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
+CONFIG_MSM_RMNET_BAM=y
 CONFIG_MSM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
diff --git a/arch/arm/configs/sa415m-perf_defconfig b/arch/arm/configs/sa415m-perf_defconfig
index ce24369..ff8219a 100644
--- a/arch/arm/configs/sa415m-perf_defconfig
+++ b/arch/arm/configs/sa415m-perf_defconfig
@@ -185,6 +185,9 @@
 CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MHI_BUS=y
+CONFIG_MHI_NETDEV=y
+CONFIG_MHI_UCI=y
 CONFIG_MTD=y
 CONFIG_MTD_TESTS=m
 CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/arm/configs/sa415m_defconfig b/arch/arm/configs/sa415m_defconfig
index b28dd77..72f4ea3 100644
--- a/arch/arm/configs/sa415m_defconfig
+++ b/arch/arm/configs/sa415m_defconfig
@@ -185,6 +185,10 @@
 CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=12
+CONFIG_MHI_BUS=y
+CONFIG_MHI_DEBUG=y
+CONFIG_MHI_NETDEV=y
+CONFIG_MHI_UCI=y
 CONFIG_MTD=y
 CONFIG_MTD_TESTS=m
 CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/arm/configs/sdm429-bg-perf_defconfig b/arch/arm/configs/sdm429-bg-perf_defconfig
index e3164f6..7bb5cf9 100644
--- a/arch/arm/configs/sdm429-bg-perf_defconfig
+++ b/arch/arm/configs/sdm429-bg-perf_defconfig
@@ -137,13 +137,9 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -153,11 +149,9 @@
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_HELPER=y
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
 CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 CONFIG_NETFILTER_XT_MATCH_MAC=y
@@ -198,13 +192,6 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
-CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=y
-CONFIG_L2TP_ETH=y
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
@@ -213,6 +200,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -236,6 +224,7 @@
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
 CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_REGMAP_SND_DIGCDC=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -245,12 +234,9 @@
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
 CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
-CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -265,6 +251,7 @@
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_PHYLIB=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
@@ -272,12 +259,10 @@
 CONFIG_PPP_MPPE=y
 CONFIG_PPP_MULTILINK=y
 CONFIG_PPPOE=y
-CONFIG_PPPOL2TP=y
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
-CONFIG_USB_USBNET=y
 # CONFIG_WLAN_VENDOR_ADMTEK is not set
 # CONFIG_WLAN_VENDOR_ATH is not set
 # CONFIG_WLAN_VENDOR_ATMEL is not set
@@ -352,22 +337,18 @@
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_VIRTUAL_SENSOR=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
 CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_MSM_GFX_LDO=y
-CONFIG_REGULATOR_QPNP=y
 CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_SPM=y
 CONFIG_REGULATOR_STUB=y
@@ -375,73 +356,33 @@
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
 CONFIG_MSM_VIDC_3X_V4L2=y
 CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
+CONFIG_ADSP_SHMEM=y
 CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
 CONFIG_FB_MSM=y
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
 CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
 CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_ELECOM=y
 CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -451,8 +392,6 @@
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
 CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MTP=y
@@ -465,7 +404,6 @@
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 # CONFIG_PWRSEQ_EMMC is not set
@@ -478,7 +416,8 @@
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SDHCI_MSM_ICE=y
-CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
@@ -495,12 +434,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
-CONFIG_RNDIS_IPA=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
 CONFIG_MSM_RMNET_BAM=y
diff --git a/arch/arm/configs/sdm429-bg_defconfig b/arch/arm/configs/sdm429-bg_defconfig
index dfcbaab..f0aeb94 100644
--- a/arch/arm/configs/sdm429-bg_defconfig
+++ b/arch/arm/configs/sdm429-bg_defconfig
@@ -141,13 +141,9 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
 CONFIG_NETFILTER_XT_TARGET_TPROXY=y
 CONFIG_NETFILTER_XT_TARGET_TRACE=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@@ -158,11 +154,9 @@
 CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
 CONFIG_NETFILTER_XT_MATCH_HELPER=y
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
 CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 CONFIG_NETFILTER_XT_MATCH_MAC=y
@@ -173,6 +167,7 @@
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
@@ -204,13 +199,6 @@
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
-CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=y
-CONFIG_L2TP_ETH=y
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_HTB=y
@@ -219,6 +207,7 @@
 CONFIG_NET_CLS_U32=y
 CONFIG_CLS_U32_MARK=y
 CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -243,6 +232,7 @@
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
 CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_REGMAP_SND_DIGCDC=y
 CONFIG_DMA_CMA=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=y
@@ -252,12 +242,9 @@
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
 CONFIG_QPNP_MISC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_DM_CRYPT=y
-CONFIG_DM_REQ_CRYPT=y
 CONFIG_DM_UEVENT=y
 CONFIG_DM_VERITY=y
 CONFIG_DM_VERITY_FEC=y
@@ -272,6 +259,7 @@
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SYNOPSYS is not set
+CONFIG_PHYLIB=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
@@ -279,12 +267,10 @@
 CONFIG_PPP_MPPE=y
 CONFIG_PPP_MULTILINK=y
 CONFIG_PPPOE=y
-CONFIG_PPPOL2TP=y
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
 CONFIG_PPP_ASYNC=y
 CONFIG_PPP_SYNC_TTY=y
-CONFIG_USB_USBNET=y
 # CONFIG_WLAN_VENDOR_ADMTEK is not set
 # CONFIG_WLAN_VENDOR_ATH is not set
 # CONFIG_WLAN_VENDOR_ATMEL is not set
@@ -361,22 +347,18 @@
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_VIRTUAL_SENSOR=y
 CONFIG_QTI_QMI_COOLING_DEVICE=y
 CONFIG_REGULATOR_COOLING_DEVICE=y
-CONFIG_QTI_BCL_PMIC5=y
-CONFIG_QTI_BCL_SOC_DRIVER=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_PROXY_CONSUMER=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_CPR=y
-CONFIG_REGULATOR_CPR4_APSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
 CONFIG_REGULATOR_MEM_ACC=y
 CONFIG_REGULATOR_MSM_GFX_LDO=y
-CONFIG_REGULATOR_QPNP=y
 CONFIG_REGULATOR_RPM_SMD=y
 CONFIG_REGULATOR_SPM=y
 CONFIG_REGULATOR_STUB=y
@@ -384,73 +366,33 @@
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSM_CAMERA_DEBUG=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSMB_CAMERA_DEBUG=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF_V2=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
 CONFIG_MSM_VIDC_3X_V4L2=y
 CONFIG_MSM_VIDC_3X_GOVERNORS=y
-CONFIG_RADIO_IRIS=y
-CONFIG_RADIO_IRIS_TRANSPORT=y
+CONFIG_ADSP_SHMEM=y
 CONFIG_QCOM_KGSL=y
 CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
 CONFIG_FB_MSM=y
 CONFIG_FB_MSM_MDSS=y
 CONFIG_FB_MSM_MDSS_WRITEBACK=y
 CONFIG_FB_MSM_MDSS_DSI_CTRL_STATUS=y
 CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=y
-CONFIG_SND_SOC_AW8896=y
 CONFIG_UHID=y
 CONFIG_HID_APPLE=y
 CONFIG_HID_ELECOM=y
 CONFIG_HID_MAGICMOUSE=y
 CONFIG_HID_MICROSOFT=y
 CONFIG_HID_MULTITOUCH=y
-CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ACM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_EHSET_TEST_FIXTURE=y
-CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_GADGET_DEBUG_FILES=y
@@ -460,9 +402,6 @@
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_SERIAL=y
 CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_QCRNDIS=y
-CONFIG_USB_CONFIGFS_RNDIS=y
-CONFIG_USB_CONFIGFS_RMNET_BAM=y
 CONFIG_USB_CONFIGFS_MASS_STORAGE=y
 CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MTP=y
@@ -475,7 +414,6 @@
 CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_USB_CONFIGFS_F_CDEV=y
 CONFIG_USB_CONFIGFS_F_CCID=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 # CONFIG_PWRSEQ_EMMC is not set
@@ -489,7 +427,8 @@
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SDHCI_MSM_ICE=y
-CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
@@ -506,12 +445,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
-CONFIG_RNDIS_IPA=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_QPNP_COINCELL=y
 CONFIG_QPNP_REVID=y
 CONFIG_USB_BAM=y
 CONFIG_MSM_RMNET_BAM=y
@@ -530,6 +465,7 @@
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_CORE_HANG_DETECT=y
 CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QPNP_PBS=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
 CONFIG_MSM_DEBUG_LAR_UNLOCK=y
 CONFIG_MSM_RPM_SMD=y
@@ -551,7 +487,6 @@
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_ICNSS=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_AVTIMER=y
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d1df9cc..b0e8175 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -385,6 +385,15 @@
 #define writesw(p,d,l)		__raw_writesw(p,d,l)
 #define writesl(p,d,l)		__raw_writesl(p,d,l)
 
+#define readb_no_log(c) \
+		({ u8  __v = readb_relaxed_no_log(c); __iormb(); __v; })
+#define readw_no_log(c) \
+		({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+#define readl_no_log(c) \
+		({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+#define readq_no_log(c) \
+		({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+
 #ifndef __ARMBE__
 static inline void memset_io(volatile void __iomem *dst, unsigned c,
 	size_t count)
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index c925cc0..eed610c 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -364,8 +364,7 @@
 	msm8953-iot-mtp-overlay.dtbo \
 	msm8953-ext-codec-mtp-overlay.dtbo \
 	msm8953-ext-codec-rcm-overlay.dtbo \
-	msm8953-cdp-1200p-overlay.dtbo \
-	msm8953-no-pmi-overlay.dtbo
+	msm8953-cdp-1200p-overlay.dtbo
 
 dtbo-$(CONFIG_ARCH_SDM450) += msm8953-mtp-overlay.dtbo \
 	msm8953-cdp-overlay.dtbo \
@@ -374,8 +373,7 @@
 	msm8953-iot-mtp-overlay.dtbo \
 	sdm450-cdp-s2-overlay.dtbo \
 	sdm450-mtp-s3-overlay.dtbo \
-	sdm450-qrd-sku4-overlay.dtbo\
-	sdm450-no-pmi-mtp-overlay.dtbo
+	sdm450-qrd-sku4-overlay.dtbo
 
 dtbo-$(CONFIG_ARCH_SDM632) += sdm632-rumi-overlay.dtbo \
 	sdm450-cdp-s2-overlay.dtbo \
@@ -475,9 +473,6 @@
 msm8953-ext-codec-rcm-overlay.dtbo-base := msm8953.dtb \
 	apq8053.dtb
 msm8953-cdp-1200p-overlay.dtbo-base := msm8953.dtb
-
-msm8953-no-pmi-overlay.dtbo-base := msm8953-no-pmi.dtb
-
 sdm450-cdp-s2-overlay.dtbo-base := sdm450-pmi632.dtb \
 	sdm632.dtb \
 	sdm632-pm8004.dtb \
@@ -490,9 +485,6 @@
 sdm450-qrd-sku4-overlay.dtbo-base := sdm450-pmi632.dtb \
 	sdm632.dtb \
 	sdm632-pm8004.dtb
-
-sdm450-no-pmi-mtp-overlay.dtbo-base := sdm450-no-pmi.dtb
-
 sdm632-rumi-overlay.dtbo-base := sdm632.dtb
 sdm632-ext-codec-cdp-s3-overlay.dtbo-base := sdm632.dtb \
 	sdm632-pm8004.dtb
@@ -563,8 +555,7 @@
 	msm8953-pmi8940-ext-codec-mtp.dtb \
 	msm8953-pmi8937-ext-codec-mtp.dtb \
 	msm8953-pmi632-cdp-s2.dtb \
-	apq8053-batcam.dtb \
-	msm8953-no-pmi.dtb
+	apq8053-batcam.dtb
 
 dtb-$(CONFIG_ARCH_MSM8937) += msm8937-pmi8950-mtp.dtb \
 	msm8937-interposer-sdm439-cdp.dtb \
@@ -628,8 +619,7 @@
 	sdm450-pmi632-cdp-s2.dtb \
 	sdm450-pmi632-mtp-s3.dtb \
 	sda450-pmi632-cdp-s2.dtb \
-	sda450-pmi632-mtp-s3.dtb \
-	sdm450-no-pmi-mtp.dtb
+	sda450-pmi632-mtp-s3.dtb
 
 dtb-$(CONFIG_ARCH_SDM632) += sdm632-rumi.dtb \
 	sdm632-cdp-s2.dtb \
diff --git a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
index 103ef7f..f57383c 100644
--- a/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
+++ b/arch/arm64/boot/dts/qcom/apq8009-robot-som-refboard.dts
@@ -387,6 +387,10 @@
 	status = "okay";
 };
 
+&mdss_fb0 {
+	/delete-node/ qcom,cont-splash-memory;
+};
+
 &mdss_mdp {
 	qcom,mdss-pref-prim-intf = "dsi";
 	status = "okay";
@@ -436,3 +440,5 @@
 		status = "okay";
 	};
 };
+
+/delete-node/ &cont_splash_mem;
diff --git a/arch/arm64/boot/dts/qcom/msm8917.dtsi b/arch/arm64/boot/dts/qcom/msm8917.dtsi
index 0673013..4ee2500 100644
--- a/arch/arm64/boot/dts/qcom/msm8917.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917.dtsi
@@ -142,6 +142,12 @@
 		cont_splash_mem: splash_region@83000000 {
 			reg = <0x0 0x90000000 0x0 0x1400000>;
 		};
+
+		dump_mem: mem_dump_region {
+			compatible = "shared-dma-pool";
+			reusable;
+			size = <0 0x400000>;
+		};
 	};
 
 	soc: soc { };
@@ -229,6 +235,47 @@
 
 	thermal_zones: thermal-zones {};
 
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpm_sw_dump {
+			qcom,dump-size = <0x28000>;
+			qcom,dump-id = <0xea>;
+		};
+
+		pmic_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xe4>;
+		};
+
+		vsense_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0xe9>;
+		};
+
+		tmc_etf_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xf0>;
+		};
+
+		tmc_etr_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x100>;
+		};
+
+		tmc_etf_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x101>;
+		};
+
+		misc_data_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0xe8>;
+		};
+
+	};
+
 	tsens0: tsens@4a8000 {
 		compatible = "qcom,msm8937-tsens";
 		reg = <0x4a8000 0x1000>,
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
index b9c901c4..f3e13c0 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-mtp.dtsi
@@ -159,6 +159,7 @@
 		qcom,csiphy-sd-index = <0>;
 		qcom,csid-sd-index = <0>;
 		qcom,mount-angle = <270>;
+		qcom,led-flash-src = <&led_flash0>;
 		qcom,eeprom-src = <&eeprom0>;
 		qcom,actuator-src = <&actuator0>;
 		cam_vio-supply = <&pm8953_l6>;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi
index be954d7..051b8b7 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-camera-sensor-qrd.dtsi
@@ -33,7 +33,7 @@
 		qcom,cci-master = <0>;
 		reg = <0x0>;
 		cam_vio-supply = <&pm8953_l6>;
-		cam_vdig-supply = <&pm8953_l23>;
+		cam_vdig-supply = <&pm8953_l2>;
 		cam_vaf-supply = <&pm8953_l17>;
 		qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf";
 		qcom,cam-vreg-min-voltage = <0 1200000 2850000>;
@@ -111,7 +111,7 @@
 		qcom,eeprom-src = <&eeprom0>;
 		qcom,actuator-src = <&actuator0>;
 		cam_vio-supply = <&pm8953_l6>;
-		cam_vdig-supply = <&pm8953_l23>;
+		cam_vdig-supply = <&pm8953_l2>;
 		cam_vaf-supply = <&pm8953_l17>;
 		cam_vana-supply = <&pm8953_l22>;
 		qcom,cam-vreg-name = "cam_vio", "cam_vdig", "cam_vaf",
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
index 1b09b3c..350d88a 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-ext-codec-mtp-overlay.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
+	qcom,usbin-vadc = <&pmi8950_vadc>;
 };
 
 &int_codec {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
index 00614b2..92cd98d 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp-overlay.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,4 +44,5 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
+	qcom,usbin-vadc = <&pmi8950_vadc>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
index 9b20013..3dfe202 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-mtp.dts
@@ -48,9 +48,5 @@
 	qcom,battery-data = <&mtp_batterydata>;
 	qcom,chg-led-sw-controls;
 	qcom,chg-led-support;
-};
-&cci {
-	 qcom,camera@0 {
-		qcom,led-flash-src = <&led_flash0>;
-	};
+	qcom,usbin-vadc = <&pmi8950_vadc>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
index 7f5fc4e..c8930b9 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-overlay.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,3 +20,74 @@
 	model = "QRD SKU3";
 	qcom,board-id = <0xb 0>;
 };
+
+&int_codec {
+	status = "disabled";
+};
+
+&pmic_analog_codec {
+	status = "disabled";
+};
+
+&cdc_pri_mi2s_gpios {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&slim_msm {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&clock_audio {
+	status = "okay";
+};
+
+&wcd9335 {
+	status = "okay";
+};
+
+&cdc_us_euro_sw {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&ext_codec {
+	qcom,model = "msm8953-sku3-tasha-snd-card";
+	status = "okay";
+
+	qcom,audio-routing =
+		"AIF4 VI", "MCLK",
+		"RX_BIAS", "MCLK",
+		"DMIC0", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic0",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
+		"DMIC2", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic2",
+		"MIC BIAS1", "MICBIAS_REGULATOR",
+		"MIC BIAS2", "MICBIAS_REGULATOR",
+		"MIC BIAS3", "MICBIAS_REGULATOR",
+		"SpkrLeft IN", "SPK1 OUT";
+
+	qcom,cdc-us-euro-gpios;
+
+	qcom,msm-mbhc-hphl-swh = <1>;
+
+	qcom,wsa-max-devs = <1>;
+	qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_213>;
+	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
index 5d892fd..fc8493e 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,9 @@
 /dts-v1/;
 
 #include "msm8953.dtsi"
+#include "pmi8950.dtsi"
 #include "msm8953-qrd-sku3.dtsi"
+#include "msm8953-pmi8950.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM8953 + PMI8950 QRD SKU3";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
index 7ff8cf2..27107e4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd-sku3.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2017,2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  */
 
 #include "msm8953-qrd.dtsi"
+#include "msm8953-camera-sensor-qrd.dtsi"
 
 &spmi_bus {
 	qcom,pmi8950@3 {
diff --git a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
index 2115dcb..dcdfd10 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-qrd.dtsi
@@ -16,7 +16,32 @@
 
 &soc {
 	i2c@78b7000 { /* BLSP1 QUP3 */
-		/delete-node/ synaptics@4b;
+		status = "okay";
+		synaptics@4b {
+			compatible = "synaptics,dsx-i2c";
+			reg = <0x4b>;
+			interrupt-parent = <&tlmm>;
+			interrupts = <65 0x2008>;
+			vdd_ana-supply = <&vdd_vreg>;
+			vcc_i2c-supply = <&pm8953_l6>;
+			synaptics,pwr-reg-name = "vdd_ana";
+			synaptics,bus-reg-name = "vcc_i2c";
+			synaptics,irq-gpio = <&tlmm 65 0x2008>;
+			synaptics,irq-on-state = <0>;
+			synaptics,irq-flags = <0x2008>;
+			synaptics,power-delay-ms = <200>;
+			synaptics,reset-delay-ms = <200>;
+			synaptics,max-y-for-2d = <1919>;
+			synaptics,cap-button-codes = <139 158 172>;
+			synaptics,vir-button-codes = <139 180 2000 320 160
+						      158 540 2000 320 160
+						      172 900 2000 320 160>;
+			synaptics,resume-in-workqueue;
+			/* Underlying clocks used by secure touch */
+			clock-names = "iface_clk", "core_clk";
+			clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>,
+				 <&clock_gcc clk_gcc_blsp1_qup3_i2c_apps_clk>;
+		};
 	};
 
 	vdd_vreg: vdd_vreg {
diff --git a/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi b/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi
index 010694d..5896615 100644
--- a/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660-rpm-regulator.dtsi
@@ -112,7 +112,7 @@
 		qcom,resource-name = "ldoa";
 		qcom,resource-id = <1>;
 		qcom,regulator-type = <0>;
-		qcom,hpm-min-load = <10000>;
+		qcom,hpm-min-load = <30000>;
 		status = "disabled";
 
 		regulator-l1 {
@@ -128,7 +128,7 @@
 		qcom,resource-name = "ldoa";
 		qcom,resource-id = <2>;
 		qcom,regulator-type = <0>;
-		qcom,hpm-min-load = <10000>;
+		qcom,hpm-min-load = <30000>;
 		status = "disabled";
 
 		regulator-l2 {
@@ -144,7 +144,7 @@
 		qcom,resource-name = "ldoa";
 		qcom,resource-id = <3>;
 		qcom,regulator-type = <0>;
-		qcom,hpm-min-load = <10000>;
+		qcom,hpm-min-load = <30000>;
 		status = "disabled";
 
 		regulator-l3 {
@@ -160,7 +160,7 @@
 		qcom,resource-name = "ldoa";
 		qcom,resource-id = <5>;
 		qcom,regulator-type = <0>;
-		qcom,hpm-min-load = <10000>;
+		qcom,hpm-min-load = <30000>;
 		status = "disabled";
 
 		regulator-l5 {
@@ -176,7 +176,7 @@
 		qcom,resource-name = "ldoa";
 		qcom,resource-id = <6>;
 		qcom,regulator-type = <0>;
-		qcom,hpm-min-load = <10000>;
+		qcom,hpm-min-load = <30000>;
 		status = "disabled";
 
 		regulator-l6 {
@@ -192,7 +192,7 @@
 		qcom,resource-name = "ldoa";
 		qcom,resource-id = <7>;
 		qcom,regulator-type = <0>;
-		qcom,hpm-min-load = <10000>;
+		qcom,hpm-min-load = <30000>;
 		status = "disabled";
 
 		regulator-l7 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index ed6ca20..3f3c8da 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -18,52 +18,40 @@
 	qcom,msm-id = <347 0x0>;
 };
 
-&removed_region {
-	reg = <0 0x85fc0000 0 0x1540000>;
-};
-
-&pil_camera_mem {
-	reg = <0 0x8b800000 0 0x500000>;
-};
-
 &pil_modem_mem {
-	reg = <0 0x8bd00000 0 0x3100000>;
+	reg = <0 0x8b000000 0 0x3100000>;
 };
 
 &pil_video_mem {
-	reg = <0 0x8ee00000 0 0x500000>;
+	reg = <0 0x8e100000 0 0x500000>;
 };
 
 &wlan_msa_mem {
-	reg = <0 0x8f300000 0 0x100000>;
+	reg = <0 0x8e600000 0 0x100000>;
 };
 
 &pil_cdsp_mem {
-	reg = <0 0x8f400000 0 0x800000>;
+	reg = <0 0x8e700000 0 0x800000>;
 };
 
 &pil_mba_mem {
-	reg = <0 0x8fc00000 0 0x200000>;
+	reg = <0 0x8ef00000 0 0x200000>;
 };
 
 &pil_adsp_mem {
-	reg = <0 0x8fe00000 0 0x1e00000>;
+	reg = <0 0x8f100000 0 0x1e00000>;
 };
 
 &pil_ipa_fw_mem {
-	reg = <0 0x91c00000 0 0x10000>;
+	reg = <0 0x90f00000 0 0x10000>;
 };
 
 &pil_ipa_gsi_mem {
-	reg = <0 0x91c10000 0 0x5000>;
+	reg = <0 0x90f10000 0 0x5000>;
 };
 
 &pil_gpu_mem {
-	reg = <0 0x91c15000 0 0x2000>;
-};
-
-&qseecom_mem {
-	reg = <0 0x9e800000 0 0x1000000>;
+	reg = <0 0x90f15000 0 0x2000>;
 };
 
 &adsp_mem {
@@ -71,7 +59,7 @@
 };
 
 &secure_display_memory {
-	status = "disabled";
+	size = <0 0xa000000>;
 };
 
 &qcom_seecom {
diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts
index f895f17..0ddd81d 100644
--- a/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wdp-overlay.dts
@@ -24,6 +24,118 @@
 	qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
 };
 
+&soc {
+
+	qcom,blackghost {
+		compatible = "qcom,pil-blackghost";
+		qcom,pil-force-shutdown;
+		qcom,firmware-name = "bg-wear";
+		/* GPIO inputs from blackghost */
+		qcom,bg2ap-status-gpio = <&tlmm 44 0>;
+		qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>;
+		/* GPIO output to blackghost */
+		qcom,ap2bg-status-gpio = <&tlmm 61 0>;
+		qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>;
+	};
+
+	qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+	};
+
+	qcom,glink-bgcom-xprt-bg {
+		compatible = "qcom,glink-bgcom-xprt";
+		label = "bg";
+		qcom,qos-config = <&glink_qos_bg>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_qos_bg: qcom,glink-qos-config-bg {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-bg-daemon {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "bg-daemon";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon";
+		};
+
+		qcom,glinkpkt-bg-display-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl";
+		};
+
+		qcom,glinkpkt-bg-display-data {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-data";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data";
+		};
+
+		qcom,glinkpkt-bg-rsb-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "RSB_CTRL";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
+		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
+
+		qcom,glinkpkt-bg-buzzer-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "buzzer-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl";
+		};
+	};
+
+	spi_3: spi@78b7000 {  /* BLSP1 QUP3*/
+		status = "ok";
+		qcom,bg-spi {
+			compatible = "qcom,bg-spi";
+			reg = <0>;
+			spi-max-frequency = <16000000>;
+			interrupt-parent = <&tlmm>;
+			qcom,irq-gpio = <&tlmm 43 1>;
+		};
+	};
+
+	i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */
+		status = "disabled";
+	};
+
+	qcom,bg-rsb {
+		compatible = "qcom,bg-rsb";
+		vdd-ldo1-supply = <&pm660_l11>;
+	};
+
+	qcom,bg-daemon {
+		compatible = "qcom,bg-daemon";
+		qcom,bg-reset-gpio = <&pm660_gpios 5 0>;
+		ssr-reg1-supply = <&pm660_l3>;
+		ssr-reg2-supply = <&pm660_l9>;
+	};
+};
+
 &usb_otg {
 	HSUSB_3p3-supply = <&L16A>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts
index af32f0c..9746c0c 100644
--- a/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wdp.dts
@@ -14,6 +14,7 @@
 /dts-v1/;
 
 #include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDA429 QRD BG WDP Spyro";
diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts
index 9602dd8..8d2e157 100644
--- a/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wtp-overlay.dts
@@ -24,6 +24,118 @@
 	qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
 };
 
+&soc {
+
+	qcom,blackghost {
+		compatible = "qcom,pil-blackghost";
+		qcom,pil-force-shutdown;
+		qcom,firmware-name = "bg-wear";
+		/* GPIO inputs from blackghost */
+		qcom,bg2ap-status-gpio = <&tlmm 44 0>;
+		qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>;
+		/* GPIO output to blackghost */
+		qcom,ap2bg-status-gpio = <&tlmm 61 0>;
+		qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>;
+	};
+
+	qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+	};
+
+	qcom,glink-bgcom-xprt-bg {
+		compatible = "qcom,glink-bgcom-xprt";
+		label = "bg";
+		qcom,qos-config = <&glink_qos_bg>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_qos_bg: qcom,glink-qos-config-bg {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-bg-daemon {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "bg-daemon";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon";
+		};
+
+		qcom,glinkpkt-bg-display-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl";
+		};
+
+		qcom,glinkpkt-bg-display-data {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-data";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data";
+		};
+
+		qcom,glinkpkt-bg-rsb-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "RSB_CTRL";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
+		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
+
+		qcom,glinkpkt-bg-buzzer-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "buzzer-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl";
+		};
+	};
+
+	spi_3: spi@78b7000 {  /* BLSP1 QUP3*/
+		status = "ok";
+		qcom,bg-spi {
+			compatible = "qcom,bg-spi";
+			reg = <0>;
+			spi-max-frequency = <16000000>;
+			interrupt-parent = <&tlmm>;
+			qcom,irq-gpio = <&tlmm 43 1>;
+		};
+	};
+
+	i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */
+		status = "disabled";
+	};
+
+	qcom,bg-rsb {
+		compatible = "qcom,bg-rsb";
+		vdd-ldo1-supply = <&pm660_l11>;
+	};
+
+	qcom,bg-daemon {
+		compatible = "qcom,bg-daemon";
+		qcom,bg-reset-gpio = <&pm660_gpios 5 0>;
+		ssr-reg1-supply = <&pm660_l3>;
+		ssr-reg2-supply = <&pm660_l9>;
+	};
+};
+
 &usb_otg {
 	HSUSB_3p3-supply = <&L16A>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts b/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts
index e1c3918..2312951 100644
--- a/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sda429-bg-wtp.dts
@@ -14,6 +14,7 @@
 /dts-v1/;
 
 #include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM429 QRD DVT Spyro";
diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts
index 2b820b6..46c7469 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-iot-wtp.dts
@@ -15,6 +15,8 @@
 
 #include "sdm429-spyro.dtsi"
 #include "sdm429-spyro-qrd-evt.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
+
 / {
 	model = "Qualcomm Technologies, Inc. SDM429W BG IOT WTP";
 	compatible = "qcom,sdm429w-qrd", "qcom,sdm429w", "qcom,qrd";
diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts
index a7ce1bd..d78655b 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp-overlay.dts
@@ -24,6 +24,118 @@
 	qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
 };
 
+&soc {
+
+	qcom,blackghost {
+		compatible = "qcom,pil-blackghost";
+		qcom,pil-force-shutdown;
+		qcom,firmware-name = "bg-wear";
+		/* GPIO inputs from blackghost */
+		qcom,bg2ap-status-gpio = <&tlmm 44 0>;
+		qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>;
+		/* GPIO output to blackghost */
+		qcom,ap2bg-status-gpio = <&tlmm 61 0>;
+		qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>;
+	};
+
+	qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+	};
+
+	qcom,glink-bgcom-xprt-bg {
+		compatible = "qcom,glink-bgcom-xprt";
+		label = "bg";
+		qcom,qos-config = <&glink_qos_bg>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_qos_bg: qcom,glink-qos-config-bg {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-bg-daemon {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "bg-daemon";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon";
+		};
+
+		qcom,glinkpkt-bg-display-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl";
+		};
+
+		qcom,glinkpkt-bg-display-data {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-data";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data";
+		};
+
+		qcom,glinkpkt-bg-rsb-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "RSB_CTRL";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
+		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
+
+		qcom,glinkpkt-bg-buzzer-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "buzzer-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl";
+		};
+	};
+
+	spi_3: spi@78b7000 {   /*BLSP1 QUP3*/
+		status = "ok";
+		qcom,bg-spi {
+			compatible = "qcom,bg-spi";
+			reg = <0>;
+			spi-max-frequency = <16000000>;
+			interrupt-parent = <&tlmm>;
+			qcom,irq-gpio = <&tlmm 43 1>;
+		};
+	};
+
+	i2c_3: i2c@78b8000 { /* BLSP1 QUP3 */
+		status = "disabled";
+	};
+
+	qcom,bg-rsb {
+		compatible = "qcom,bg-rsb";
+		vdd-ldo1-supply = <&pm660_l11>;
+	};
+
+	qcom,bg-daemon {
+		compatible = "qcom,bg-daemon";
+		qcom,bg-reset-gpio = <&pm660_gpios 5 0>;
+		ssr-reg1-supply = <&pm660_l3>;
+		ssr-reg2-supply = <&pm660_l9>;
+	};
+};
+
 &usb_otg {
 	HSUSB_3p3-supply = <&L16A>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts
index 74159c4..26ed8a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wdp.dts
@@ -14,6 +14,7 @@
 /dts-v1/;
 
 #include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM429 QRD BG WDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts
index 96c2385..6502a3e 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp-overlay.dts
@@ -24,6 +24,118 @@
 	qcom,pmic-id = <0x0002001b 0x0 0x0 0x0>;
 };
 
+&soc {
+
+	qcom,blackghost {
+		compatible = "qcom,pil-blackghost";
+		qcom,pil-force-shutdown;
+		qcom,firmware-name = "bg-wear";
+		/* GPIO inputs from blackghost */
+		qcom,bg2ap-status-gpio = <&tlmm 44 0>;
+		qcom,bg2ap-errfatal-gpio = <&tlmm 72 0>;
+		/* GPIO output to blackghost */
+		qcom,ap2bg-status-gpio = <&tlmm 61 0>;
+		qcom,ap2bg-errfatal-gpio = <&tlmm 62 0>;
+	};
+
+	qcom,msm-ssc-sensors {
+		compatible = "qcom,msm-ssc-sensors";
+	};
+
+	qcom,glink-bgcom-xprt-bg {
+		compatible = "qcom,glink-bgcom-xprt";
+		label = "bg";
+		qcom,qos-config = <&glink_qos_bg>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_qos_bg: qcom,glink-qos-config-bg {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-bg-daemon {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "bg-daemon";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_daemon";
+		};
+
+		qcom,glinkpkt-bg-display-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_ctrl";
+		};
+
+		qcom,glinkpkt-bg-display-data {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "display-data";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_display_data";
+		};
+
+		qcom,glinkpkt-bg-rsb-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "RSB_CTRL";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_rsb_ctrl";
+		};
+
+		qcom,glinkpkt-bg-sso-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "sso-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_sso_ctrl";
+		};
+
+		qcom,glinkpkt-bg-buzzer-ctrl {
+			qcom,glinkpkt-transport = "bgcom";
+			qcom,glinkpkt-edge = "bg";
+			qcom,glinkpkt-ch-name = "buzzer-ctrl";
+			qcom,glinkpkt-dev-name = "glink_pkt_bg_buzzer_ctrl";
+		};
+	};
+
+	spi_3: spi@78b7000 {  /* BLSP1 QUP3*/
+		status = "ok";
+		qcom,bg-spi {
+			compatible = "qcom,bg-spi";
+			reg = <0>;
+			spi-max-frequency = <16000000>;
+			interrupt-parent = <&tlmm>;
+			qcom,irq-gpio = <&tlmm 43 1>;
+		};
+	};
+
+	i2c_3: i2c@78b7000 { /* BLSP1 QUP3 */
+		status = "disabled";
+	};
+
+	qcom,bg-rsb {
+		compatible = "qcom,bg-rsb";
+		vdd-ldo1-supply = <&pm660_l11>;
+	};
+
+	qcom,bg-daemon {
+		compatible = "qcom,bg-daemon";
+		qcom,bg-reset-gpio = <&pm660_gpios 5 0>;
+		ssr-reg1-supply = <&pm660_l3>;
+		ssr-reg2-supply = <&pm660_l9>;
+	};
+};
+
 &usb_otg {
 	HSUSB_3p3-supply = <&L16A>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts
index 4caa7fe..62294f0 100644
--- a/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429-bg-wtp.dts
@@ -14,6 +14,7 @@
 /dts-v1/;
 
 #include "sdm429-spyro.dtsi"
+#include "sdm429w-bg-pm660.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM429 BG WTP";
diff --git a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi
similarity index 70%
rename from arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
rename to arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi
index fa9ccbb..7c389ff 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-no-pmi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm429w-bg-pm660.dtsi
@@ -11,13 +11,22 @@
  * GNU General Public License for more details.
  */
 
-/dts-v1/;
+#include "sdm429w-pm660.dtsi"
 
-#include "msm8953.dtsi"
-#include "msm8953-mtp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. msm8953 + NO PMI SOC";
-	compatible = "qcom,msm8953";
-	qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
+&pm660_misc {
+	qcom,support-twm-config;
 };
+
+&pm660_pbs {
+	status = "okay";
+};
+
+&pm660_pon {
+	qcom,support-twm-config;
+	qcom,pbs-client = <&pm660_pbs>;
+};
+
+&pm660_fg {
+	qcom,fg-disable-in-twm;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
index f6d85f4..0dd6035 100644
--- a/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429w-pm660.dtsi
@@ -371,9 +371,10 @@
 /* over-write the PM660 GPIO mappings for 429w */
 &pm660_gpios {
 	interrupts  = <0x0 0xc3 0 IRQ_TYPE_NONE>,
+		      <0x0 0xc4 0 IRQ_TYPE_NONE>,
 		      <0x0 0xcb 0 IRQ_TYPE_NONE>;
-	interrupt-names = "pm660_gpio4", "pm660_gpio12";
-	qcom,gpios-disallowed = <1 2 3 5 6 7 8 9 10 11 13>;
+	interrupt-names = "pm660_gpio4", "pm660_gpio5", "pm660_gpio12";
+	qcom,gpios-disallowed = <1 2 3 6 7 8 9 10 11 13>;
 };
 
 &pm660_vadc {
@@ -761,4 +762,3 @@
 	/delete-property/ vddio-supply;
 	vddio-supply = <&L13A>;
 };
-
diff --git a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
index db76a24..6e51013 100644
--- a/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm429w-regulator.dtsi
@@ -265,6 +265,7 @@
 			regulator-min-microvolt = <1600000>;
 			regulator-max-microvolt = <2040000>;
 			qcom,init-voltage = <1600000>;
+			regulator-system-load = <20000>;
 			status = "okay";
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts
deleted file mode 100644
index cd78c2c..0000000
--- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp-overlay.dts
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-/plugin/;
-
-/ {
-	model = " SDM450 NOPMI MTP";
-	qcom,board-id = <8 3>;
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts
deleted file mode 100644
index 71a9eef..0000000
--- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi-mtp.dts
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-#include "sdm450.dtsi"
-#include "msm8953-mtp.dtsi"
-/ {
-	model = "Qualcomm Technologies, Inc. SDM450 + NO PMI MTP S3";
-	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
-	qcom,board-id = <8 3>;
-	qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
-};
-
-&eeprom0 {
-	cam_vdig-supply = <&pm8953_l23>;
-};
-
-&camera0 {
-	cam_vdig-supply = <&pm8953_l23>;
-};
-
-&pm8953_gpios {
-	bklt_en {
-		bklt_en_default: bklt_en_default {
-		pins = "gpio4";
-		function = "normal";
-		power-source = <0>;
-		output-high;
-		};
-	};
-};
-
-&pm8953_pwm {
-	status = "ok";
-};
-
-&mdss_dsi0 {
-	qcom,dsi-pref-prim-pan = <&dsi_hx8399c_truly_vid>;
-	pinctrl-names = "mdss_default", "mdss_sleep";
-	pinctrl-0 = <&mdss_dsi_active &mdss_te_active &bklt_en_default>;
-	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
-	qcom,platform-bklight-en-gpio = <&pm8953_gpios 4 0>;
-
-};
-
-&dsi_truly_1080_vid {
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
-	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
-	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
-	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
-
-&dsi_hx8399c_truly_vid {
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
-	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
-	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
-	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts b/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts
deleted file mode 100644
index afb3554..0000000
--- a/arch/arm64/boot/dts/qcom/sdm450-no-pmi.dts
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/dts-v1/;
-
-#include "sdm450.dtsi"
-#include "msm8953-mtp.dtsi"
-/ {
-	model = "Qualcomm Technologies, Inc. SDM450 + NO PMI MTP S3";
-	compatible = "qcom,sdm450-mtp", "qcom,sdm450", "qcom,mtp";
-	qcom,pmic-id = <0x010016 0x0 0x0 0x0>;
-};
-
-&eeprom0 {
-	cam_vdig-supply = <&pm8953_l23>;
-};
-
-&camera0 {
-	cam_vdig-supply = <&pm8953_l23>;
-};
-
-&pm8953_gpios {
-	bklt_en {
-		bklt_en_default: bklt_en_default {
-		pins = "gpio4";
-		function = "normal";
-		power-source = <0>;
-		output-high;
-		};
-	};
-};
-
-&pm8953_pwm {
-	status = "ok";
-};
-
-&mdss_dsi0 {
-	qcom,dsi-pref-prim-pan = <&dsi_hx8399c_truly_vid>;
-	pinctrl-names = "mdss_default", "mdss_sleep";
-	pinctrl-0 = <&mdss_dsi_active &mdss_te_active &bklt_en_default>;
-	pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
-	qcom,platform-bklight-en-gpio = <&pm8953_gpios 4 0>;
-
-};
-
-&dsi_truly_1080_vid {
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
-	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
-	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
-	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
-
-&dsi_hx8399c_truly_vid {
-	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
-	qcom,mdss-dsi-bl-pmic-pwm-frequency = <100>;
-	qcom,mdss-dsi-bl-pmic-bank-select = <0>;
-	qcom,mdss-dsi-pwm-gpio = <&pm8953_gpios 8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
index cb01d39..4e9a217 100644
--- a/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
+++ b/arch/arm64/boot/dts/qcom/sdm450-pmi632.dts
@@ -19,6 +19,6 @@
 / {
 	model = "Qualcomm Technologies, Inc. SDM450 + PMI632 SOC";
 	compatible = "qcom,sdm450";
-	qcom,pmic-id = <0x16 0x25 0x0 0x0>;
+	qcom,pmic-id = <0x010016 0x25 0x0 0x0>;
 	qcom,pmic-name = "PMI632";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
index 1259893..4040623 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-vidc.dtsi
@@ -80,7 +80,7 @@
 				<&apps_smmu 0x10a0 0x8>,
 				<&apps_smmu 0x10b0 0x0>;
 			buffer-types = <0xfff>;
-			virtual-addr-pool = <0x79000000 0x87000000>;
+			virtual-addr-pool = <0x79000000 0x67000000>;
 		};
 
 		secure_bitstream_cb {
@@ -180,7 +180,7 @@
 				<&apps_smmu 0x10a0 0x8>,
 				<&apps_smmu 0x10b0 0x0>;
 			buffer-types = <0xfff>;
-			virtual-addr-pool = <0x79000000 0x87000000>;
+			virtual-addr-pool = <0x79000000 0x67000000>;
 		};
 
 		secure_bitstream_cb {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 4374ef3..78f1f97 100755
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -321,6 +321,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 66ae8cf..f30592c 100755
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -328,6 +328,10 @@
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v26=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v26=y
+CONFIG_SECURE_TOUCH_SYNAPTICS_DSX_V26=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_HBTP_INPUT=y
 CONFIG_INPUT_QPNP_POWER_ON=y
diff --git a/arch/arm64/configs/qcs605-perf_defconfig b/arch/arm64/configs/qcs605-perf_defconfig
new file mode 100644
index 0000000..775ac0c
--- /dev/null
+++ b/arch/arm64/configs/qcs605-perf_defconfig
@@ -0,0 +1,658 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_AQT_REGMAP=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_LAN78XX=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_NX30P6093=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SMB1390_CHARGE_PUMP=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_REFGEN=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
+CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_DRM_LT_LT9611=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_QCS605_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_REMOTEQDSS=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+# CONFIG_NVMEM_SYSFS is not set
+CONFIG_QCOM_QFPROM=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/qcs605_defconfig b/arch/arm64/configs/qcs605_defconfig
new file mode 100644
index 0000000..3b7319c
--- /dev/null
+++ b/arch/arm64/configs/qcs605_defconfig
@@ -0,0 +1,721 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM670=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_PROCESS_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
+CONFIG_PSCI_BP_HARDENING=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_AQT_REGMAP=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_LAN78XX=y
+CONFIG_USB_USBNET=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_HBTP_INPUT=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_DIAG_CHAR=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_SDM670=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_NX30P6093=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB2=y
+CONFIG_QPNP_QNOVO=y
+CONFIG_SMB1390_CHARGE_PUMP=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_AOP_REG_COOLING_DEVICE=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_REFGEN=y
+CONFIG_REGULATOR_RPMH=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
+CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_DRM_LT_LT9611=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_KRYO3XX_ARM64=y
+CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_11AD=m
+CONFIG_SEEMP_CORE=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_MSM_GCC_SDM845=y
+CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
+CONFIG_MSM_DISPCC_SDM845=y
+CONFIG_CLOCK_QPNP_DIV=y
+CONFIG_MSM_CLK_RPMH=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_SDM670_LLCC=y
+CONFIG_QCOM_QCS605_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_MSM_QBT1000=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_REMOTEQDSS=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_RRADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_NVMEM=y
+# CONFIG_NVMEM_SYSFS is not set
+CONFIG_QCOM_QFPROM=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/certs/Makefile b/certs/Makefile
index 2773c4a..9d89b99 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -17,6 +17,10 @@
 quiet_cmd_extract_certs  = EXTRACT_CERTS   $(patsubst "%",%,$(2))
       cmd_extract_certs  = scripts/extract-cert $(2) $@ || ( rm $@; exit 1)
 
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYS),"verity.x509.pem")
+SYSTEM_TRUSTED_KEYS_SRCPREFIX := $(srctree)/certs/
+endif
+
 targets += x509_certificate_list
 $(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
 	$(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
diff --git a/certs/verity.x509.pem b/certs/verity.x509.pem
new file mode 100644
index 0000000..86399c3
--- /dev/null
+++ b/certs/verity.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
+AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
+A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
+6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
+fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
+T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
+AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
+jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
+HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
+oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
+NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
+JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
+dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
+UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
+yttuAJAEAymk1mipd9+zp38=
+-----END CERTIFICATE-----
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index a803e72..b3ec52c 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -509,7 +509,7 @@
 static int bt_dt_parse_clk_info(struct device *dev,
 		struct bt_power_clk_data **clk_data)
 {
-	int ret = -EINVAL;
+	int ret = 0;
 	struct bt_power_clk_data *clk = NULL;
 	struct device_node *np = dev->of_node;
 
@@ -546,7 +546,7 @@
 
 		*clk_data = clk;
 	} else {
-		BT_PWR_ERR("clocks is not provided in device tree");
+		BT_PWR_INFO("clocks is not provided in device tree");
 	}
 
 err:
@@ -567,7 +567,7 @@
 			of_get_named_gpio(pdev->dev.of_node,
 						"qca,bt-reset-gpio", 0);
 		if (bt_power_pdata->bt_gpio_sys_rst < 0)
-			BT_PWR_ERR("bt-reset-gpio not provided in device tree");
+			BT_PWR_INFO("bt-reset-gpio not provided in devicetree");
 
 		rc = bt_dt_parse_vreg_info(&pdev->dev,
 					&bt_power_pdata->bt_vdd_core,
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7875105..d8cdd34 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -167,4 +167,25 @@
 	help
 	  Platform configuration infrastructure for the ARM Ltd.
 	  Versatile Express.
+
+config MHI_BUS
+        tristate "Modem Host Interface"
+        help
+          MHI Host Interface is a communication protocol to be used by the host
+          to control and communcate with modem over a high speed peripheral bus.
+          Enabling this module will allow host to communicate with external
+          devices that support MHI protocol.
+
+config MHI_DEBUG
+         bool "MHI debug support"
+         depends on MHI_BUS
+         help
+           Say yes here to enable debugging support in the MHI transport
+           and individual MHI client drivers. This option will impact
+           throughput as individual MHI packets and state transitions
+           will be logged.
+
+source drivers/bus/mhi/controllers/Kconfig
+source drivers/bus/mhi/devices/Kconfig
+
 endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c6cfa6b..6b1df6b 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -21,3 +21,4 @@
 obj-$(CONFIG_TEGRA_ACONNECT)	+= tegra-aconnect.o
 obj-$(CONFIG_UNIPHIER_SYSTEM_BUS)	+= uniphier-system-bus.o
 obj-$(CONFIG_VEXPRESS_CONFIG)	+= vexpress-config.o
+obj-$(CONFIG_MHI_BUS)	+= mhi/
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
new file mode 100644
index 0000000..2382e04
--- /dev/null
+++ b/drivers/bus/mhi/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the MHI stack
+#
+
+# core layer
+obj-y += core/
+obj-y += controllers/
+obj-y += devices/
diff --git a/drivers/bus/mhi/controllers/Kconfig b/drivers/bus/mhi/controllers/Kconfig
new file mode 100644
index 0000000..e8a90b6
--- /dev/null
+++ b/drivers/bus/mhi/controllers/Kconfig
@@ -0,0 +1,10 @@
+menu "MHI controllers"
+
+config MHI_QCOM
+       tristate "MHI QCOM"
+       depends on MHI_BUS
+       help
+	  If you say yes to this option, MHI bus support for QCOM modem chipsets
+	  will be enabled.
+
+endmenu
diff --git a/drivers/bus/mhi/controllers/Makefile b/drivers/bus/mhi/controllers/Makefile
new file mode 100644
index 0000000..9cb38a5
--- /dev/null
+++ b/drivers/bus/mhi/controllers/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MHI_QCOM) += mhi_qcom.o mhi_arch_qcom.o
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
new file mode 100644
index 0000000..0da2863
--- /dev/null
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -0,0 +1,770 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+#include <linux/async.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/esoc_client.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_pcie.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/mhi.h>
+#include "mhi_qcom.h"
+
+struct arch_info {
+	struct mhi_dev *mhi_dev;
+	struct esoc_desc *esoc_client;
+	struct esoc_client_hook esoc_ops;
+	struct msm_bus_scale_pdata *msm_bus_pdata;
+	u32 bus_client;
+	struct msm_pcie_register_event pcie_reg_event;
+	struct pci_saved_state *pcie_state;
+	struct dma_iommu_mapping *mapping;
+	async_cookie_t cookie;
+	void *boot_ipc_log;
+	void *tsync_ipc_log;
+	struct mhi_device *boot_dev;
+	struct notifier_block pm_notifier;
+	struct completion pm_completion;
+};
+
+/* ipc log markings */
+#define DLOG "Dev->Host: "
+#define HLOG "Host: "
+
+#define MHI_TSYNC_LOG_PAGES (10)
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_IPC_LOG_PAGES (100)
+enum MHI_DEBUG_LEVEL  mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;
+
+#else
+
+#define MHI_IPC_LOG_PAGES (10)
+enum MHI_DEBUG_LEVEL  mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
+
+#endif
+
+static int mhi_arch_pm_notifier(struct notifier_block *nb,
+				unsigned long event, void *unused)
+{
+	struct arch_info *arch_info =
+		container_of(nb, struct arch_info, pm_notifier);
+
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		reinit_completion(&arch_info->pm_completion);
+		break;
+
+	case PM_POST_SUSPEND:
+		complete_all(&arch_info->pm_completion);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static void mhi_arch_timesync_log(struct mhi_controller *mhi_cntrl,
+				  u64 remote_time)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+
+	if (remote_time != U64_MAX)
+		ipc_log_string(arch_info->tsync_ipc_log, "%6u.%06lu 0x%llx",
+			       REMOTE_TICKS_TO_SEC(remote_time),
+			       REMOTE_TIME_REMAINDER_US(remote_time),
+			       remote_time);
+}
+
+static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+
+	MHI_LOG("Setting bus request to index %d\n", index);
+
+	if (arch_info->bus_client)
+		return msm_bus_scale_client_update_request(
+							arch_info->bus_client,
+							index);
+
+	/* default return success */
+	return 0;
+}
+
+static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify)
+{
+	struct mhi_controller *mhi_cntrl = notify->data;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+	switch (notify->event) {
+	case MSM_PCIE_EVENT_WAKEUP:
+		MHI_LOG("Received MSM_PCIE_EVENT_WAKE signal\n");
+
+		/* bring link out of d3cold */
+		if (mhi_dev->powered_on) {
+			pm_runtime_get(&pci_dev->dev);
+			pm_runtime_put_noidle(&pci_dev->dev);
+		}
+		break;
+	case MSM_PCIE_EVENT_L1SS_TIMEOUT:
+		MHI_VERB("Received MSM_PCIE_EVENT_L1SS_TIMEOUT signal\n");
+
+		pm_runtime_mark_last_busy(&pci_dev->dev);
+		pm_request_autosuspend(&pci_dev->dev);
+		break;
+	default:
+		MHI_ERR("Unhandled event 0x%x\n", notify->event);
+	}
+}
+
+static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
+{
+	struct mhi_controller *mhi_cntrl = priv;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	int ret;
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+	if (mhi_dev->powered_on) {
+		MHI_LOG("MHI still in active state\n");
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		return 0;
+	}
+
+	MHI_LOG("Enter\n");
+
+	/* reset rpm state */
+	pm_runtime_set_active(&pci_dev->dev);
+	pm_runtime_enable(&pci_dev->dev);
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+	pm_runtime_forbid(&pci_dev->dev);
+	ret = pm_runtime_get_sync(&pci_dev->dev);
+	if (ret < 0) {
+		MHI_ERR("Error with rpm resume, ret:%d\n", ret);
+		return ret;
+	}
+
+	/* re-start the link & recover default cfg states */
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, pci_dev->bus->number,
+				  pci_dev, NULL, 0);
+	if (ret) {
+		MHI_ERR("Failed to resume pcie bus ret %d\n", ret);
+		return ret;
+	}
+
+	return mhi_pci_probe(pci_dev, NULL);
+}
+
+static void mhi_arch_link_off(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+	MHI_LOG("Entered\n");
+
+	pci_set_power_state(pci_dev, PCI_D3hot);
+
+	/* release the resources */
+	msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev, NULL, 0);
+	mhi_arch_set_bus_request(mhi_cntrl, 0);
+
+	MHI_LOG("Exited\n");
+}
+
+static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)
+{
+	struct mhi_controller *mhi_cntrl = priv;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	bool mdm_state = (flags & ESOC_HOOK_MDM_CRASH);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+	MHI_LOG("Enter: mdm_crashed:%d\n", mdm_state);
+
+	/*
+	 * Abort system suspend if system is preparing to go to suspend
+	 * by grabbing wake source.
+	 * If system is suspended, wait for pm notifier callback to notify
+	 * that resume has occurred with PM_POST_SUSPEND event.
+	 */
+	pm_stay_awake(&mhi_cntrl->mhi_dev->dev);
+	wait_for_completion(&arch_info->pm_completion);
+
+	/* if link is in drv suspend, wake it up */
+	pm_runtime_get_sync(&pci_dev->dev);
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+	if (!mhi_dev->powered_on) {
+		MHI_LOG("Not in active state\n");
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		pm_runtime_put_noidle(&pci_dev->dev);
+		return;
+	}
+	mhi_dev->powered_on = false;
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	pm_runtime_put_noidle(&pci_dev->dev);
+
+	MHI_LOG("Triggering shutdown process\n");
+	mhi_power_down(mhi_cntrl, !mdm_state);
+
+	/* turn the link off */
+	mhi_deinit_pci_dev(mhi_cntrl);
+	mhi_arch_link_off(mhi_cntrl);
+
+	/* wait for boot monitor to exit */
+	async_synchronize_cookie(arch_info->cookie + 1);
+
+	mhi_arch_iommu_deinit(mhi_cntrl);
+	mhi_arch_pcie_deinit(mhi_cntrl);
+
+	pm_relax(&mhi_cntrl->mhi_dev->dev);
+}
+
+static void mhi_arch_esoc_ops_mdm_error(void *priv)
+{
+	struct mhi_controller *mhi_cntrl = priv;
+
+	MHI_LOG("Enter: mdm asserted\n");
+
+	/* transition MHI state into error state */
+	mhi_control_error(mhi_cntrl);
+
+	MHI_LOG("Exit\n");
+}
+
+static void mhi_bl_dl_cb(struct mhi_device *mhi_device,
+			 struct mhi_result *mhi_result)
+{
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	char *buf = mhi_result->buf_addr;
+
+	/* force a null at last character */
+	buf[mhi_result->bytes_xferd - 1] = 0;
+
+	ipc_log_string(arch_info->boot_ipc_log, "%s %s", DLOG, buf);
+}
+
+static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev,
+			    struct mhi_result *mhi_result)
+{
+}
+
+static void mhi_bl_remove(struct mhi_device *mhi_device)
+{
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+
+	arch_info->boot_dev = NULL;
+	ipc_log_string(arch_info->boot_ipc_log,
+		       HLOG "Received Remove notif.\n");
+}
+
+static void mhi_boot_monitor(void *data, async_cookie_t cookie)
+{
+	struct mhi_controller *mhi_cntrl = data;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct mhi_device *boot_dev;
+	/* 15 sec timeout for booting device */
+	const u32 timeout = msecs_to_jiffies(15000);
+
+	/* wait for device to enter boot stage */
+	wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
+			   || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
+			   timeout);
+
+	ipc_log_string(arch_info->boot_ipc_log, HLOG "Device current ee = %s\n",
+		       TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	/* if we successfully booted to amss disable boot log channel */
+	if (mhi_cntrl->ee == MHI_EE_AMSS) {
+		boot_dev = arch_info->boot_dev;
+		if (boot_dev)
+			mhi_unprepare_from_transfer(boot_dev);
+
+		pm_runtime_allow(&mhi_dev->pci_dev->dev);
+	}
+}
+
+int mhi_arch_power_up(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+
+	/* start a boot monitor */
+	arch_info->cookie = async_schedule(mhi_boot_monitor, mhi_cntrl);
+
+	return 0;
+}
+
+static  int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
+				   struct pci_dev *pci_dev,
+				   struct mhi_link_info *link_info)
+{
+	int ret;
+
+	mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
+	ret = msm_pcie_set_link_bandwidth(pci_dev, link_info->target_link_speed,
+					  link_info->target_link_width);
+	mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
+
+	if (ret)
+		return ret;
+
+	/* do a bus scale vote based on gen speeds */
+	mhi_arch_set_bus_request(mhi_cntrl, link_info->target_link_speed);
+
+	MHI_VERB("bw changed to speed:0x%x width:0x%x\n",
+		 link_info->target_link_speed, link_info->target_link_width);
+
+	return 0;
+}
+
+static int mhi_arch_bw_scale(struct mhi_controller *mhi_cntrl,
+			     struct mhi_link_info *link_info)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+	return mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, link_info);
+}
+
+static int mhi_bl_probe(struct mhi_device *mhi_device,
+			const struct mhi_device_id *id)
+{
+	char node_name[32];
+	struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+
+	snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u",
+		 mhi_device->dev_id, mhi_device->domain, mhi_device->bus,
+		 mhi_device->slot);
+
+	arch_info->boot_dev = mhi_device;
+	arch_info->boot_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
+							 node_name, 0);
+	ipc_log_string(arch_info->boot_ipc_log, HLOG
+		       "Entered SBL, Session ID:0x%x\n", mhi_cntrl->session_id);
+
+	return 0;
+}
+
+static const struct mhi_device_id mhi_bl_match_table[] = {
+	{ .chan = "BL" },
+	{},
+};
+
+static struct mhi_driver mhi_bl_driver = {
+	.id_table = mhi_bl_match_table,
+	.remove = mhi_bl_remove,
+	.probe = mhi_bl_probe,
+	.ul_xfer_cb = mhi_bl_dummy_cb,
+	.dl_xfer_cb = mhi_bl_dl_cb,
+	.driver = {
+		.name = "MHI_BL",
+		.owner = THIS_MODULE,
+	},
+};
+
+int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct mhi_link_info *cur_link_info;
+	char node[32];
+	int ret;
+	u16 linkstat;
+
+	if (!arch_info) {
+		struct msm_pcie_register_event *reg_event;
+
+		arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
+					 sizeof(*arch_info), GFP_KERNEL);
+		if (!arch_info)
+			return -ENOMEM;
+
+		mhi_dev->arch_info = arch_info;
+		arch_info->mhi_dev = mhi_dev;
+
+		snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u",
+			 mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+			 mhi_cntrl->slot);
+		mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
+							    node, 0);
+		mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
+
+		snprintf(node, sizeof(node), "mhi_tsync_%04x_%02u.%02u.%02u",
+			 mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+			 mhi_cntrl->slot);
+		arch_info->tsync_ipc_log = ipc_log_context_create(
+					   MHI_TSYNC_LOG_PAGES, node, 0);
+		if (arch_info->tsync_ipc_log)
+			mhi_cntrl->tsync_log = mhi_arch_timesync_log;
+
+		/* register for bus scale if defined */
+		arch_info->msm_bus_pdata = msm_bus_cl_get_pdata_from_dev(
+							&mhi_dev->pci_dev->dev);
+		if (arch_info->msm_bus_pdata) {
+			arch_info->bus_client =
+				msm_bus_scale_register_client(
+						arch_info->msm_bus_pdata);
+			if (!arch_info->bus_client)
+				return -EINVAL;
+		}
+
+		/* register with pcie rc for WAKE# events */
+		reg_event = &arch_info->pcie_reg_event;
+		reg_event->events =
+			MSM_PCIE_EVENT_WAKEUP | MSM_PCIE_EVENT_L1SS_TIMEOUT;
+
+		reg_event->user = mhi_dev->pci_dev;
+		reg_event->callback = mhi_arch_pci_link_state_cb;
+		reg_event->notify.data = mhi_cntrl;
+		ret = msm_pcie_register_event(reg_event);
+		if (ret)
+			MHI_LOG("Failed to reg. for link up notification\n");
+
+		init_completion(&arch_info->pm_completion);
+
+		/* register PM notifier to get post resume events */
+		arch_info->pm_notifier.notifier_call = mhi_arch_pm_notifier;
+		register_pm_notifier(&arch_info->pm_notifier);
+
+		/*
+		 * Mark as completed at initial boot-up to allow ESOC power on
+		 * callback to proceed if system has not gone to suspend
+		 */
+		complete_all(&arch_info->pm_completion);
+
+		arch_info->esoc_client = devm_register_esoc_client(
+						&mhi_dev->pci_dev->dev, "mdm");
+		if (IS_ERR_OR_NULL(arch_info->esoc_client)) {
+			MHI_ERR("Failed to register esoc client\n");
+		} else {
+			/* register for power on/off hooks */
+			struct esoc_client_hook *esoc_ops =
+				&arch_info->esoc_ops;
+
+			esoc_ops->priv = mhi_cntrl;
+			esoc_ops->prio = ESOC_MHI_HOOK;
+			esoc_ops->esoc_link_power_on =
+				mhi_arch_esoc_ops_power_on;
+			esoc_ops->esoc_link_power_off =
+				mhi_arch_esoc_ops_power_off;
+			esoc_ops->esoc_link_mdm_crash =
+				mhi_arch_esoc_ops_mdm_error;
+
+			ret = esoc_register_client_hook(arch_info->esoc_client,
+							esoc_ops);
+			if (ret)
+				MHI_ERR("Failed to register esoc ops\n");
+		}
+
+		/*
+		 * MHI host driver has full autonomy to manage power state.
+		 * Disable all automatic power collapse features
+		 */
+		msm_pcie_pm_control(MSM_PCIE_DISABLE_PC, mhi_cntrl->bus,
+				    mhi_dev->pci_dev, NULL, 0);
+		mhi_dev->pci_dev->no_d3hot = true;
+
+		mhi_cntrl->bw_scale = mhi_arch_bw_scale;
+
+		mhi_driver_register(&mhi_bl_driver);
+	}
+
+	/* store the current bw info */
+	ret = pcie_capability_read_word(mhi_dev->pci_dev,
+					PCI_EXP_LNKSTA, &linkstat);
+	if (ret)
+		return ret;
+
+	cur_link_info = &mhi_cntrl->mhi_link_info;
+	cur_link_info->target_link_speed = linkstat & PCI_EXP_LNKSTA_CLS;
+	cur_link_info->target_link_width = (linkstat & PCI_EXP_LNKSTA_NLW) >>
+					    PCI_EXP_LNKSTA_NLW_SHIFT;
+
+	return mhi_arch_set_bus_request(mhi_cntrl,
+					cur_link_info->target_link_speed);
+}
+
+void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
+{
+	mhi_arch_set_bus_request(mhi_cntrl, 0);
+}
+
+static struct dma_iommu_mapping *mhi_arch_create_iommu_mapping(
+					struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	dma_addr_t base;
+	size_t size;
+
+	/*
+	 * If S1_BYPASS enabled then iommu space is not used, however framework
+	 * still require clients to create a mapping space before attaching. So
+	 * set to smallest size required by iommu framework.
+	 */
+	if (mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS) {
+		base = 0;
+		size = PAGE_SIZE;
+	} else {
+		base = mhi_dev->iova_start;
+		size = (mhi_dev->iova_stop - base) + 1;
+	}
+
+	MHI_LOG("Create iommu mapping of base:%pad size:%zu\n",
+		&base, size);
+	return arm_iommu_create_mapping(&pci_bus_type, base, size);
+}
+
+int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	u32 smmu_config = mhi_dev->smmu_cfg;
+	struct dma_iommu_mapping *mapping = NULL;
+	int ret;
+
+	if (smmu_config) {
+		mapping = mhi_arch_create_iommu_mapping(mhi_cntrl);
+		if (IS_ERR(mapping)) {
+			MHI_ERR("Failed to create iommu mapping\n");
+			return PTR_ERR(mapping);
+		}
+	}
+
+	if (smmu_config & MHI_SMMU_S1_BYPASS) {
+		int s1_bypass = 1;
+
+		ret = iommu_domain_set_attr(mapping->domain,
+					    DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+		if (ret) {
+			MHI_ERR("Failed to set attribute S1_BYPASS\n");
+			goto release_mapping;
+		}
+	}
+
+	if (smmu_config & MHI_SMMU_FAST) {
+		int fast_map = 1;
+
+		ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST,
+					    &fast_map);
+		if (ret) {
+			MHI_ERR("Failed to set attribute FAST_MAP\n");
+			goto release_mapping;
+		}
+	}
+
+	if (smmu_config & MHI_SMMU_ATOMIC) {
+		int atomic = 1;
+
+		ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC,
+					    &atomic);
+		if (ret) {
+			MHI_ERR("Failed to set attribute ATOMIC\n");
+			goto release_mapping;
+		}
+	}
+
+	if (smmu_config & MHI_SMMU_FORCE_COHERENT) {
+		int force_coherent = 1;
+
+		ret = iommu_domain_set_attr(mapping->domain,
+					DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+					&force_coherent);
+		if (ret) {
+			MHI_ERR("Failed to set attribute FORCE_COHERENT\n");
+			goto release_mapping;
+		}
+	}
+
+	if (smmu_config) {
+		ret = arm_iommu_attach_device(&mhi_dev->pci_dev->dev, mapping);
+
+		if (ret) {
+			MHI_ERR("Error attach device, ret:%d\n", ret);
+			goto release_mapping;
+		}
+		arch_info->mapping = mapping;
+	}
+
+	mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
+
+	ret = dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		MHI_ERR("Error setting dma mask, ret:%d\n", ret);
+		goto release_device;
+	}
+
+	return 0;
+
+release_device:
+	arm_iommu_detach_device(mhi_cntrl->dev);
+
+release_mapping:
+	arm_iommu_release_mapping(mapping);
+
+	return ret;
+}
+
+void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct dma_iommu_mapping *mapping = arch_info->mapping;
+
+	if (mapping) {
+		arm_iommu_detach_device(mhi_cntrl->dev);
+		arm_iommu_release_mapping(mapping);
+	}
+	arch_info->mapping = NULL;
+	mhi_cntrl->dev = NULL;
+}
+
+int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	int ret = 0;
+
+	MHI_LOG("Entered\n");
+
+	/* disable inactivity timer */
+	msm_pcie_l1ss_timeout_disable(pci_dev);
+
+	switch (mhi_dev->suspend_mode) {
+	case MHI_DEFAULT_SUSPEND:
+		pci_clear_master(pci_dev);
+		ret = pci_save_state(mhi_dev->pci_dev);
+		if (ret) {
+			MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
+			goto exit_suspend;
+		}
+
+		arch_info->pcie_state = pci_store_saved_state(pci_dev);
+		pci_disable_device(pci_dev);
+
+		pci_set_power_state(pci_dev, PCI_D3hot);
+
+		/* release the resources */
+		msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev,
+				    NULL, 0);
+		mhi_arch_set_bus_request(mhi_cntrl, 0);
+		break;
+	case MHI_FAST_LINK_OFF:
+	case MHI_ACTIVE_STATE:
+	case MHI_FAST_LINK_ON:/* keeping link on do nothing */
+		break;
+	}
+
+exit_suspend:
+	if (ret)
+		msm_pcie_l1ss_timeout_enable(pci_dev);
+
+	MHI_LOG("Exited with ret:%d\n", ret);
+
+	return ret;
+}
+
+static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct arch_info *arch_info = mhi_dev->arch_info;
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
+	int ret;
+
+	MHI_LOG("Entered\n");
+
+	/* request bus scale voting based on higher gen speed */
+	ret = mhi_arch_set_bus_request(mhi_cntrl,
+				       cur_info->target_link_speed);
+	if (ret)
+		MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
+
+	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev,
+				  NULL, 0);
+	if (ret) {
+		MHI_ERR("Link training failed, ret:%d\n", ret);
+		return ret;
+	}
+
+	ret = pci_set_power_state(pci_dev, PCI_D0);
+	if (ret) {
+		MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
+		return ret;
+	}
+
+	ret = pci_enable_device(pci_dev);
+	if (ret) {
+		MHI_ERR("Failed to enable device, ret:%d\n", ret);
+		return ret;
+	}
+
+	ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
+	if (ret)
+		MHI_LOG("Failed to load saved cfg state\n");
+
+	pci_restore_state(pci_dev);
+	pci_set_master(pci_dev);
+
+	return 0;
+}
+
+int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	int ret = 0;
+
+	MHI_LOG("Entered\n");
+
+	switch (mhi_dev->suspend_mode) {
+	case MHI_DEFAULT_SUSPEND:
+		ret = __mhi_arch_link_resume(mhi_cntrl);
+		break;
+	case MHI_FAST_LINK_OFF:
+	case MHI_ACTIVE_STATE:
+	case MHI_FAST_LINK_ON:
+		break;
+	}
+
+	if (ret) {
+		MHI_ERR("Link training failed, ret:%d\n", ret);
+		return ret;
+	}
+
+	msm_pcie_l1ss_timeout_enable(pci_dev);
+
+	MHI_LOG("Exited\n");
+
+	return 0;
+}
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
new file mode 100644
index 0000000..3df293c
--- /dev/null
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -0,0 +1,875 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/arch_timer.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mhi.h>
+#include "mhi_qcom.h"
+
+struct firmware_info {
+	unsigned int dev_id;
+	const char *fw_image;
+	const char *edl_image;
+};
+
+static const struct firmware_info firmware_table[] = {
+	{.dev_id = 0x306, .fw_image = "sdx55m/sbl1.mbn"},
+	{.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"},
+	{.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"},
+	/* default, set to debug.mbn */
+	{.fw_image = "debug.mbn"},
+};
+
+static int debug_mode;
+module_param_named(debug_mode, debug_mode, int, 0644);
+
+int mhi_debugfs_trigger_m0(void *data, u64 val)
+{
+	struct mhi_controller *mhi_cntrl = data;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+	MHI_LOG("Trigger M3 Exit\n");
+	pm_runtime_get(&mhi_dev->pci_dev->dev);
+	pm_runtime_put(&mhi_dev->pci_dev->dev);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
+			mhi_debugfs_trigger_m0, "%llu\n");
+
+int mhi_debugfs_trigger_m3(void *data, u64 val)
+{
+	struct mhi_controller *mhi_cntrl = data;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+	MHI_LOG("Trigger M3 Entry\n");
+	pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
+	pm_request_autosuspend(&mhi_dev->pci_dev->dev);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
+			mhi_debugfs_trigger_m3, "%llu\n");
+
+void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+
+	pm_runtime_mark_last_busy(&pci_dev->dev);
+	pm_runtime_dont_use_autosuspend(&pci_dev->dev);
+	pm_runtime_disable(&pci_dev->dev);
+
+	/* reset counter for lpm state changes */
+	mhi_dev->lpm_disable_depth = 0;
+
+	pci_free_irq_vectors(pci_dev);
+	kfree(mhi_cntrl->irq);
+	mhi_cntrl->irq = NULL;
+	iounmap(mhi_cntrl->regs);
+	mhi_cntrl->regs = NULL;
+	pci_clear_master(pci_dev);
+	pci_release_region(pci_dev, mhi_dev->resn);
+	pci_disable_device(pci_dev);
+}
+
+static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	int ret;
+	resource_size_t len;
+	int i;
+
+	mhi_dev->resn = MHI_PCI_BAR_NUM;
+	ret = pci_assign_resource(pci_dev, mhi_dev->resn);
+	if (ret) {
+		MHI_ERR("Error assign pci resources, ret:%d\n", ret);
+		return ret;
+	}
+
+	ret = pci_enable_device(pci_dev);
+	if (ret) {
+		MHI_ERR("Error enabling device, ret:%d\n", ret);
+		goto error_enable_device;
+	}
+
+	ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
+	if (ret) {
+		MHI_ERR("Error pci_request_region, ret:%d\n", ret);
+		goto error_request_region;
+	}
+
+	pci_set_master(pci_dev);
+
+	mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn);
+	len = pci_resource_len(pci_dev, mhi_dev->resn);
+	mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len);
+	if (!mhi_cntrl->regs) {
+		MHI_ERR("Error ioremap region\n");
+		goto error_ioremap;
+	}
+
+	ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required,
+				    mhi_cntrl->msi_required, PCI_IRQ_MSI);
+	if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
+		MHI_ERR("Failed to enable MSI, ret:%d\n", ret);
+		goto error_req_msi;
+	}
+
+	mhi_cntrl->msi_allocated = ret;
+	mhi_cntrl->irq = kmalloc_array(mhi_cntrl->msi_allocated,
+				       sizeof(*mhi_cntrl->irq), GFP_KERNEL);
+	if (!mhi_cntrl->irq) {
+		ret = -ENOMEM;
+		goto error_alloc_msi_vec;
+	}
+
+	for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
+		mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
+		if (mhi_cntrl->irq[i] < 0) {
+			ret = mhi_cntrl->irq[i];
+			goto error_get_irq_vec;
+		}
+	}
+
+	dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
+
+	/* configure runtime pm */
+	pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS);
+	pm_runtime_use_autosuspend(&pci_dev->dev);
+	pm_suspend_ignore_children(&pci_dev->dev, true);
+
+	/*
+	 * pci framework will increment usage count (twice) before
+	 * calling local device driver probe function.
+	 * 1st pci.c pci_pm_init() calls pm_runtime_forbid
+	 * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
+	 * Framework expect pci device driver to call
+	 * pm_runtime_put_noidle to decrement usage count after
+	 * successful probe and and call pm_runtime_allow to enable
+	 * runtime suspend.
+	 */
+	pm_runtime_mark_last_busy(&pci_dev->dev);
+	pm_runtime_put_noidle(&pci_dev->dev);
+
+	return 0;
+
+error_get_irq_vec:
+	kfree(mhi_cntrl->irq);
+	mhi_cntrl->irq = NULL;
+
+error_alloc_msi_vec:
+	pci_free_irq_vectors(pci_dev);
+
+error_req_msi:
+	iounmap(mhi_cntrl->regs);
+
+error_ioremap:
+	pci_clear_master(pci_dev);
+
+error_request_region:
+	pci_disable_device(pci_dev);
+
+error_enable_device:
+	pci_release_region(pci_dev, mhi_dev->resn);
+
+	return ret;
+}
+
+static int mhi_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+	MHI_LOG("Enter\n");
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	if (!mhi_dev->powered_on) {
+		MHI_LOG("Not fully powered, return success\n");
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		return 0;
+	}
+
+	ret = mhi_pm_suspend(mhi_cntrl);
+
+	if (ret) {
+		MHI_LOG("Abort due to ret:%d\n", ret);
+		mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+		goto exit_runtime_suspend;
+	}
+
+	mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+
+	ret = mhi_arch_link_suspend(mhi_cntrl);
+
+	/* failed suspending link abort mhi suspend */
+	if (ret) {
+		MHI_LOG("Failed to suspend link, abort suspend\n");
+		mhi_pm_resume(mhi_cntrl);
+		mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+	}
+
+exit_runtime_suspend:
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+	MHI_LOG("Exited with ret:%d\n", ret);
+
+	return ret;
+}
+
+static int mhi_runtime_idle(struct device *dev)
+{
+	struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+
+	MHI_LOG("Entered returning -EBUSY\n");
+
+	/*
+	 * RPM framework during runtime resume always calls
+	 * rpm_idle to see if device ready to suspend.
+	 * If dev.power usage_count count is 0, rpm fw will call
+	 * rpm_idle cb to see if device is ready to suspend.
+	 * if cb return 0, or cb not defined the framework will
+	 * assume device driver is ready to suspend;
+	 * therefore, fw will schedule runtime suspend.
+	 * In MHI power management, MHI host shall go to
+	 * runtime suspend only after entering MHI State M2, even if
+	 * usage count is 0.  Return -EBUSY to disable automatic suspend.
+	 */
+	return -EBUSY;
+}
+
+static int mhi_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+	MHI_LOG("Enter\n");
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	if (!mhi_dev->powered_on) {
+		MHI_LOG("Not fully powered, return success\n");
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		return 0;
+	}
+
+	/* turn on link */
+	ret = mhi_arch_link_resume(mhi_cntrl);
+	if (ret)
+		goto rpm_resume_exit;
+
+
+	/* transition to M0 state */
+	if (mhi_dev->suspend_mode == MHI_DEFAULT_SUSPEND)
+		ret = mhi_pm_resume(mhi_cntrl);
+	else
+		ret = mhi_pm_fast_resume(mhi_cntrl, MHI_FAST_LINK_ON);
+
+	mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+
+rpm_resume_exit:
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+	MHI_LOG("Exited with :%d\n", ret);
+
+	return ret;
+}
+
+static int mhi_system_resume(struct device *dev)
+{
+	return mhi_runtime_resume(dev);
+}
+
+int mhi_system_suspend(struct device *dev)
+{
+	struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int ret;
+
+	MHI_LOG("Entered\n");
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	if (!mhi_dev->powered_on) {
+		MHI_LOG("Not fully powered, return success\n");
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		return 0;
+	}
+
+	/*
+	 * pci framework always makes a dummy vote to rpm
+	 * framework to resume before calling system suspend
+	 * hence usage count is minimum one
+	 */
+	if (atomic_read(&dev->power.usage_count) > 1) {
+		/*
+		 * clients have requested to keep link on, try
+		 * fast suspend. No need to notify clients since
+		 * we will not be turning off the pcie link
+		 */
+		ret = mhi_pm_fast_suspend(mhi_cntrl, false);
+		mhi_dev->suspend_mode = MHI_FAST_LINK_ON;
+	} else {
+		/* try normal suspend */
+		mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+		ret = mhi_pm_suspend(mhi_cntrl);
+
+		/*
+		 * normal suspend failed because we're busy, try
+		 * fast suspend before aborting system suspend.
+		 * this could happens if client has disabled
+		 * device lpm but no active vote for PCIe from
+		 * apps processor
+		 */
+		if (ret == -EBUSY) {
+			ret = mhi_pm_fast_suspend(mhi_cntrl, true);
+			mhi_dev->suspend_mode = MHI_FAST_LINK_ON;
+		}
+	}
+
+	if (ret) {
+		MHI_LOG("Abort due to ret:%d\n", ret);
+		mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+		goto exit_system_suspend;
+	}
+
+	ret = mhi_arch_link_suspend(mhi_cntrl);
+
+	/* failed suspending link abort mhi suspend */
+	if (ret) {
+		MHI_LOG("Failed to suspend link, abort suspend\n");
+		if (mhi_dev->suspend_mode == MHI_DEFAULT_SUSPEND)
+			mhi_pm_resume(mhi_cntrl);
+		else
+			mhi_pm_fast_resume(mhi_cntrl, MHI_FAST_LINK_OFF);
+
+		mhi_dev->suspend_mode = MHI_ACTIVE_STATE;
+	}
+
+exit_system_suspend:
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	MHI_LOG("Exit with ret:%d\n", ret);
+
+	return ret;
+}
+
+static int mhi_force_suspend(struct mhi_controller *mhi_cntrl)
+{
+	int ret = -EIO;
+	const u32 delayms = 100;
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms, delayms);
+
+	MHI_LOG("Entered\n");
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	for (; itr; itr--) {
+		/*
+		 * This function get called soon as device entered mission mode
+		 * so most of the channels are still in disabled state. However,
+		 * sbl channels are active and clients could be trying to close
+		 * channels while we trying to suspend the link. So, we need to
+		 * re-try if MHI is busy
+		 */
+		ret = mhi_pm_suspend(mhi_cntrl);
+		if (!ret || ret != -EBUSY)
+			break;
+
+		MHI_LOG("MHI busy, sleeping and retry\n");
+		msleep(delayms);
+	}
+
+	if (ret)
+		goto exit_force_suspend;
+
+	mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
+	ret = mhi_arch_link_suspend(mhi_cntrl);
+
+exit_force_suspend:
+	MHI_LOG("Force suspend ret with %d\n", ret);
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return ret;
+}
+
+/* checks if link is down */
+static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
+{
+	struct mhi_dev *mhi_dev = priv;
+	u16 dev_id;
+	int ret;
+
+	/* try reading device id, if dev id don't match, link is down */
+	ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
+
+	return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
+}
+
+/* disable PCIe L1 */
+static int mhi_lpm_disable(struct mhi_controller *mhi_cntrl, void *priv)
+{
+	struct mhi_dev *mhi_dev = priv;
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL;
+	u8 val;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&mhi_dev->lpm_lock, flags);
+
+	/* L1 is already disabled */
+	if (mhi_dev->lpm_disable_depth) {
+		mhi_dev->lpm_disable_depth++;
+		goto lpm_disable_exit;
+	}
+
+	ret = pci_read_config_byte(pci_dev, lnkctl, &val);
+	if (ret) {
+		MHI_ERR("Error reading LNKCTL, ret:%d\n", ret);
+		goto lpm_disable_exit;
+	}
+
+	/* L1 is not supported, do not increment lpm_disable_depth */
+	if (unlikely(!(val & PCI_EXP_LNKCTL_ASPM_L1)))
+		goto lpm_disable_exit;
+
+	val &= ~PCI_EXP_LNKCTL_ASPM_L1;
+	ret = pci_write_config_byte(pci_dev, lnkctl, val);
+	if (ret) {
+		MHI_ERR("Error writing LNKCTL to disable LPM, ret:%d\n", ret);
+		goto lpm_disable_exit;
+	}
+
+	mhi_dev->lpm_disable_depth++;
+
+lpm_disable_exit:
+	spin_unlock_irqrestore(&mhi_dev->lpm_lock, flags);
+
+	return ret;
+}
+
+/* enable PCIe L1 */
+static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv)
+{
+	struct mhi_dev *mhi_dev = priv;
+	struct pci_dev *pci_dev = mhi_dev->pci_dev;
+	int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL;
+	u8 val;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&mhi_dev->lpm_lock, flags);
+
+	/*
+	 * Exit if L1 is not supported or is already disabled or
+	 * decrementing lpm_disable_depth still keeps it above 0
+	 */
+	if (!mhi_dev->lpm_disable_depth)
+		goto lpm_enable_exit;
+
+	if (mhi_dev->lpm_disable_depth > 1) {
+		mhi_dev->lpm_disable_depth--;
+		goto lpm_enable_exit;
+	}
+
+	ret = pci_read_config_byte(pci_dev, lnkctl, &val);
+	if (ret) {
+		MHI_ERR("Error reading LNKCTL, ret:%d\n", ret);
+		goto lpm_enable_exit;
+	}
+
+	val |= PCI_EXP_LNKCTL_ASPM_L1;
+	ret = pci_write_config_byte(pci_dev, lnkctl, val);
+	if (ret) {
+		MHI_ERR("Error writing LNKCTL to enable LPM, ret:%d\n", ret);
+		goto lpm_enable_exit;
+	}
+
+	mhi_dev->lpm_disable_depth = 0;
+
+lpm_enable_exit:
+	spin_unlock_irqrestore(&mhi_dev->lpm_lock, flags);
+
+	return ret;
+}
+
+static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
+{
+	enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl);
+	const u32 delayus = 10;
+	int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms * 1000, delayus);
+	int ret;
+
+	/*
+	 * It's possible device did not go thru a cold reset before
+	 * power up and still in error state. If device in error state,
+	 * we need to trigger a soft reset before continue with power
+	 * up
+	 */
+	if (dev_state == MHI_STATE_SYS_ERR) {
+		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+		while (itr--) {
+			dev_state = mhi_get_mhi_state(mhi_cntrl);
+			if (dev_state != MHI_STATE_SYS_ERR)
+				break;
+			usleep_range(delayus, delayus << 1);
+		}
+		/* device still in error state, abort power up */
+		if (dev_state == MHI_STATE_SYS_ERR)
+			return -EIO;
+	}
+
+	/* when coming out of SSR, initial ee state is not valid */
+	mhi_cntrl->ee = 0;
+
+	ret = mhi_arch_power_up(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	ret = mhi_async_power_up(mhi_cntrl);
+
+	/* power up create the dentry */
+	if (mhi_cntrl->dentry) {
+		debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
+				    &debugfs_trigger_m0_fops);
+		debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
+				    &debugfs_trigger_m3_fops);
+	}
+
+	return ret;
+}
+
+static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
+{
+	struct mhi_dev *mhi_dev = priv;
+	struct device *dev = &mhi_dev->pci_dev->dev;
+
+	return pm_runtime_get(dev);
+}
+
+static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
+{
+	struct mhi_dev *mhi_dev = priv;
+	struct device *dev = &mhi_dev->pci_dev->dev;
+
+	pm_runtime_put_noidle(dev);
+}
+
+static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
+			  void *priv,
+			  enum MHI_CB reason)
+{
+	struct mhi_dev *mhi_dev = priv;
+	struct device *dev = &mhi_dev->pci_dev->dev;
+	int ret;
+
+	switch (reason) {
+	case MHI_CB_IDLE:
+		MHI_LOG("Schedule runtime suspend\n");
+		pm_runtime_mark_last_busy(dev);
+		pm_request_autosuspend(dev);
+		break;
+	case MHI_CB_EE_MISSION_MODE:
+		/*
+		 * we need to force a suspend so device can switch to
+		 * mission mode pcie phy settings.
+		 */
+		pm_runtime_get(dev);
+		ret = mhi_force_suspend(mhi_cntrl);
+		if (!ret)
+			mhi_runtime_resume(dev);
+		pm_runtime_put(dev);
+		break;
+	default:
+		MHI_ERR("Unhandled cb:0x%x\n", reason);
+	}
+}
+
+/* capture host SoC XO time in ticks */
+static u64 mhi_time_get(struct mhi_controller *mhi_cntrl, void *priv)
+{
+	return arch_counter_get_cntvct();
+}
+
+static ssize_t timeout_ms_show(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	/* buffer provided by sysfs has a minimum size of PAGE_SIZE */
+	return snprintf(buf, PAGE_SIZE, "%u\n", mhi_cntrl->timeout_ms);
+}
+
+static ssize_t timeout_ms_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf,
+				size_t count)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	u32 timeout_ms;
+
+	if (kstrtou32(buf, 0, &timeout_ms) < 0)
+		return -EINVAL;
+
+	mhi_cntrl->timeout_ms = timeout_ms;
+
+	return count;
+}
+static DEVICE_ATTR_RW(timeout_ms);
+
+static ssize_t power_up_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf,
+			      size_t count)
+{
+	int ret;
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	ret = mhi_qcom_power_up(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	return count;
+}
+static DEVICE_ATTR_WO(power_up);
+
+static struct attribute *mhi_qcom_attrs[] = {
+	&dev_attr_timeout_ms.attr,
+	&dev_attr_power_up.attr,
+	NULL
+};
+
+static const struct attribute_group mhi_qcom_group = {
+	.attrs = mhi_qcom_attrs,
+};
+
+static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
+{
+	struct mhi_controller *mhi_cntrl;
+	struct mhi_dev *mhi_dev;
+	struct device_node *of_node = pci_dev->dev.of_node;
+	const struct firmware_info *firmware_info;
+	bool use_bb;
+	u64 addr_win[2];
+	int ret, i;
+
+	if (!of_node)
+		return ERR_PTR(-ENODEV);
+
+	mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
+	if (!mhi_cntrl)
+		return ERR_PTR(-ENOMEM);
+
+	mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+	mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
+	mhi_cntrl->dev_id = pci_dev->device;
+	mhi_cntrl->bus = pci_dev->bus->number;
+	mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
+
+	ret = of_property_read_u32(of_node, "qcom,smmu-cfg",
+				   &mhi_dev->smmu_cfg);
+	if (ret)
+		goto error_register;
+
+	use_bb = of_property_read_bool(of_node, "mhi,use-bb");
+
+	/*
+	 * if s1 translation enabled or using bounce buffer pull iova addr
+	 * from dt
+	 */
+	if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH &&
+		       !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) {
+		ret = of_property_count_elems_of_size(of_node, "qcom,addr-win",
+						      sizeof(addr_win));
+		if (ret != 1)
+			goto error_register;
+		ret = of_property_read_u64_array(of_node, "qcom,addr-win",
+						 addr_win, 2);
+		if (ret)
+			goto error_register;
+	} else {
+		addr_win[0] = memblock_start_of_DRAM();
+		addr_win[1] = memblock_end_of_DRAM();
+	}
+
+	mhi_dev->iova_start = addr_win[0];
+	mhi_dev->iova_stop = addr_win[1];
+
+	/*
+	 * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low
+	 * level mapping api to map buffers outside of smmu domain
+	 */
+	if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH &&
+	    !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))
+		mhi_cntrl->iova_start = 0;
+	else
+		mhi_cntrl->iova_start = addr_win[0];
+
+	mhi_cntrl->iova_stop = mhi_dev->iova_stop;
+	mhi_cntrl->of_node = of_node;
+
+	mhi_dev->pci_dev = pci_dev;
+	spin_lock_init(&mhi_dev->lpm_lock);
+
+	/* setup power management apis */
+	mhi_cntrl->status_cb = mhi_status_cb;
+	mhi_cntrl->runtime_get = mhi_runtime_get;
+	mhi_cntrl->runtime_put = mhi_runtime_put;
+	mhi_cntrl->link_status = mhi_link_status;
+
+	mhi_cntrl->lpm_disable = mhi_lpm_disable;
+	mhi_cntrl->lpm_enable = mhi_lpm_enable;
+	mhi_cntrl->time_get = mhi_time_get;
+	mhi_cntrl->remote_timer_freq = 19200000;
+	mhi_cntrl->local_timer_freq = 19200000;
+
+	ret = of_register_mhi_controller(mhi_cntrl);
+	if (ret)
+		goto error_register;
+
+	for (i = 0; i < ARRAY_SIZE(firmware_table); i++) {
+		firmware_info = firmware_table + i;
+
+		/* debug mode always use default */
+		if (!debug_mode && mhi_cntrl->dev_id == firmware_info->dev_id)
+			break;
+	}
+
+	mhi_cntrl->fw_image = firmware_info->fw_image;
+	mhi_cntrl->edl_image = firmware_info->edl_image;
+
+	ret = sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
+				 &mhi_qcom_group);
+	if (ret)
+		goto error_register;
+
+	return mhi_cntrl;
+
+error_register:
+	mhi_free_controller(mhi_cntrl);
+
+	return ERR_PTR(-EINVAL);
+}
+
+int mhi_pci_probe(struct pci_dev *pci_dev,
+		  const struct pci_device_id *device_id)
+{
+	struct mhi_controller *mhi_cntrl;
+	u32 domain = pci_domain_nr(pci_dev->bus);
+	u32 bus = pci_dev->bus->number;
+	u32 dev_id = pci_dev->device;
+	u32 slot = PCI_SLOT(pci_dev->devfn);
+	struct mhi_dev *mhi_dev;
+	int ret;
+
+	/* see if we already registered */
+	mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id);
+	if (!mhi_cntrl)
+		mhi_cntrl = mhi_register_controller(pci_dev);
+
+	if (IS_ERR(mhi_cntrl))
+		return PTR_ERR(mhi_cntrl);
+
+	mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+	mhi_dev->powered_on = true;
+
+	ret = mhi_arch_pcie_init(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	ret = mhi_arch_iommu_init(mhi_cntrl);
+	if (ret)
+		goto error_iommu_init;
+
+	ret = mhi_init_pci_dev(mhi_cntrl);
+	if (ret)
+		goto error_init_pci;
+
+	/* start power up sequence */
+	if (!debug_mode) {
+		ret = mhi_qcom_power_up(mhi_cntrl);
+		if (ret)
+			goto error_power_up;
+	}
+
+	pm_runtime_mark_last_busy(&pci_dev->dev);
+
+	MHI_LOG("Return successful\n");
+
+	return 0;
+
+error_power_up:
+	mhi_deinit_pci_dev(mhi_cntrl);
+
+error_init_pci:
+	mhi_arch_iommu_deinit(mhi_cntrl);
+
+error_iommu_init:
+	mhi_arch_pcie_deinit(mhi_cntrl);
+
+	return ret;
+}
+
+static const struct dev_pm_ops pm_ops = {
+	SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
+			   mhi_runtime_resume,
+			   mhi_runtime_idle)
+	SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
+};
+
+static struct pci_device_id mhi_pcie_device_id[] = {
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0300)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0301)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0302)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)},
+	{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
+	{0},
+};
+
+static struct pci_driver mhi_pcie_driver = {
+	.name = "mhi",
+	.id_table = mhi_pcie_device_id,
+	.probe = mhi_pci_probe,
+	.driver = {
+		.pm = &pm_ops
+	}
+};
+
+module_pci_driver(mhi_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("MHI_CORE");
+MODULE_DESCRIPTION("MHI Host Driver");
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
new file mode 100644
index 0000000..9060802
--- /dev/null
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MHI_QCOM_
+#define _MHI_QCOM_
+
+/* iova cfg bitmask */
+#define MHI_SMMU_ATTACH BIT(0)
+#define MHI_SMMU_S1_BYPASS BIT(1)
+#define MHI_SMMU_FAST BIT(2)
+#define MHI_SMMU_ATOMIC BIT(3)
+#define MHI_SMMU_FORCE_COHERENT BIT(4)
+
+#define MHI_PCIE_VENDOR_ID (0x17cb)
+#define MHI_PCIE_DEBUG_ID (0xffff)
+
+/* runtime suspend timer */
+#define MHI_RPM_SUSPEND_TMR_MS (250)
+#define MHI_PCI_BAR_NUM (0)
+
+/* timesync time calculations */
+#define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+			       div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
+#define REMOTE_TICKS_TO_SEC(x) (div_u64((x), \
+				mhi_cntrl->remote_timer_freq))
+#define REMOTE_TIME_REMAINDER_US(x) (REMOTE_TICKS_TO_US((x)) % \
+					(REMOTE_TICKS_TO_SEC((x)) * 1000000ULL))
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (ee >= MHI_EE_MAX ? "INVALID_EE" : mhi_ee_str[ee])
+
+enum mhi_suspend_mode {
+	MHI_ACTIVE_STATE,
+	MHI_DEFAULT_SUSPEND,
+	MHI_FAST_LINK_OFF,
+	MHI_FAST_LINK_ON,
+};
+
+#define MHI_IS_SUSPENDED(mode) (mode)
+
+struct mhi_dev {
+	struct pci_dev *pci_dev;
+	bool drv_supported;
+	u32 smmu_cfg;
+	int resn;
+	void *arch_info;
+	bool powered_on;
+	dma_addr_t iova_start;
+	dma_addr_t iova_stop;
+	enum mhi_suspend_mode suspend_mode;
+
+	unsigned int lpm_disable_depth;
+	/* lock to toggle low power modes */
+	spinlock_t lpm_lock;
+};
+
+void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
+int mhi_pci_probe(struct pci_dev *pci_dev,
+		  const struct pci_device_id *device_id);
+
+#ifdef CONFIG_ARCH_QCOM
+
+int mhi_arch_power_up(struct mhi_controller *mhi_cntrl);
+int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
+void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
+int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
+void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
+int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl);
+int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl);
+
+#else
+
+static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+
+	mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
+
+	return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
+}
+
+static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
+{
+	return 0;
+}
+
+static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static inline int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
+{
+	return 0;
+}
+
+static inline int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
+{
+	return 0;
+}
+
+static inline int mhi_arch_power_up(struct mhi_controller *mhi_cntrl)
+{
+	return 0;
+}
+
+#endif
+
+#endif /* _MHI_QCOM_ */
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/core/Makefile
new file mode 100644
index 0000000..a743fbf
--- /dev/null
+++ b/drivers/bus/mhi/core/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
new file mode 100644
index 0000000..58bbcfc
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -0,0 +1,556 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+
+/* setup rddm vector table for rddm transfer and program rxvec */
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+			     struct image_info *img_info)
+{
+	struct mhi_buf *mhi_buf = img_info->mhi_buf;
+	struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+	void __iomem *base = mhi_cntrl->bhie;
+	u32 sequence_id;
+	int i = 0;
+
+	for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
+		MHI_VERB("Setting vector:%pad size:%zu\n",
+			 &mhi_buf->dma_addr, mhi_buf->len);
+		bhi_vec->dma_addr = mhi_buf->dma_addr;
+		bhi_vec->size = mhi_buf->len;
+	}
+
+	MHI_LOG("BHIe programming for RDDM\n");
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+		      upper_32_bits(mhi_buf->dma_addr));
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+		      lower_32_bits(mhi_buf->dma_addr));
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+	sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
+
+	if (unlikely(!sequence_id))
+		sequence_id = 1;
+
+	mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
+			    BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
+			    sequence_id);
+
+	MHI_LOG("address:%pad len:0x%zx sequence:%u\n",
+		&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
+}
+
+/* collect rddm during kernel panic */
+static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+	u32 rx_status;
+	enum mhi_ee ee;
+	const u32 delayus = 2000;
+	u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
+	const u32 rddm_timeout_us = 200000;
+	int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
+	void __iomem *base = mhi_cntrl->bhie;
+
+	MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	/*
+	 * This should only be executing during a kernel panic, we expect all
+	 * other cores to shutdown while we're collecting rddm buffer. After
+	 * returning from this function, we expect device to reset.
+	 *
+	 * Normaly, we would read/write pm_state only after grabbing
+	 * pm_lock, since we're in a panic, skipping it. Also there is no
+	 * gurantee this state change would take effect since
+	 * we're setting it w/o grabbing pmlock, it's best effort
+	 */
+	mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+	/* update should take the effect immediately */
+	smp_wmb();
+
+	/*
+	 * Make sure device is not already in RDDM.
+	 * In case device asserts and a kernel panic follows, device will
+	 * already be in RDDM. Do not trigger SYS ERR again and proceed with
+	 * waiting for image download completion.
+	 */
+	ee = mhi_get_exec_env(mhi_cntrl);
+	if (ee != MHI_EE_RDDM) {
+
+		MHI_LOG("Trigger device into RDDM mode using SYSERR\n");
+		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+		MHI_LOG("Waiting for device to enter RDDM\n");
+		while (rddm_retry--) {
+			ee = mhi_get_exec_env(mhi_cntrl);
+			if (ee == MHI_EE_RDDM)
+				break;
+
+			udelay(delayus);
+		}
+
+		if (rddm_retry <= 0) {
+			/* Hardware reset; force device to enter rddm */
+			MHI_LOG(
+				"Did not enter RDDM, do a host req. reset\n");
+			mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+				      MHI_SOC_RESET_REQ_OFFSET,
+				      MHI_SOC_RESET_REQ);
+			udelay(delayus);
+		}
+
+		ee = mhi_get_exec_env(mhi_cntrl);
+	}
+
+	MHI_LOG("Waiting for image download completion, current EE:%s\n",
+		TO_MHI_EXEC_STR(ee));
+	while (retry--) {
+		ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
+					 BHIE_RXVECSTATUS_STATUS_BMSK,
+					 BHIE_RXVECSTATUS_STATUS_SHFT,
+					 &rx_status);
+		if (ret)
+			return -EIO;
+
+		if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
+			MHI_LOG("RDDM successfully collected\n");
+			return 0;
+		}
+
+		udelay(delayus);
+	}
+
+	ee = mhi_get_exec_env(mhi_cntrl);
+	ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
+
+	MHI_ERR("Did not complete RDDM transfer\n");
+	MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
+	MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
+
+	return -EIO;
+}
+
+/* download ramdump image from device */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
+{
+	void __iomem *base = mhi_cntrl->bhie;
+	u32 rx_status;
+
+	if (in_panic)
+		return __mhi_download_rddm_in_panic(mhi_cntrl);
+
+	MHI_LOG("Waiting for image download completion\n");
+
+	/* waiting for image download completion */
+	wait_event_timeout(mhi_cntrl->state_event,
+			   mhi_read_reg_field(mhi_cntrl, base,
+					      BHIE_RXVECSTATUS_OFFS,
+					      BHIE_RXVECSTATUS_STATUS_BMSK,
+					      BHIE_RXVECSTATUS_STATUS_SHFT,
+					      &rx_status) || rx_status,
+			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(mhi_download_rddm_img);
+
+static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
+			    const struct mhi_buf *mhi_buf)
+{
+	void __iomem *base = mhi_cntrl->bhie;
+	rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+	u32 tx_status;
+
+	read_lock_bh(pm_lock);
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+		read_unlock_bh(pm_lock);
+		return -EIO;
+	}
+
+	MHI_LOG("Starting BHIe Programming\n");
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+		      upper_32_bits(mhi_buf->dma_addr));
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+		      lower_32_bits(mhi_buf->dma_addr));
+
+	mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+
+	mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
+	mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
+			    BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
+			    mhi_cntrl->sequence_id);
+	read_unlock_bh(pm_lock);
+
+	MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
+		upper_32_bits(mhi_buf->dma_addr),
+		lower_32_bits(mhi_buf->dma_addr),
+		mhi_buf->len, mhi_cntrl->sequence_id);
+	MHI_LOG("Waiting for image transfer completion\n");
+
+	/* waiting for image download completion */
+	wait_event_timeout(mhi_cntrl->state_event,
+			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+			   mhi_read_reg_field(mhi_cntrl, base,
+					      BHIE_TXVECSTATUS_OFFS,
+					      BHIE_TXVECSTATUS_STATUS_BMSK,
+					      BHIE_TXVECSTATUS_STATUS_SHFT,
+					      &tx_status) || tx_status,
+			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+		return -EIO;
+
+	return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+}
+
+static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
+			   dma_addr_t dma_addr,
+			   size_t size)
+{
+	u32 tx_status, val;
+	int i, ret;
+	void __iomem *base = mhi_cntrl->bhi;
+	rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+	struct {
+		char *name;
+		u32 offset;
+	} error_reg[] = {
+		{ "ERROR_CODE", BHI_ERRCODE },
+		{ "ERROR_DBG1", BHI_ERRDBG1 },
+		{ "ERROR_DBG2", BHI_ERRDBG2 },
+		{ "ERROR_DBG3", BHI_ERRDBG3 },
+		{ NULL },
+	};
+
+	MHI_LOG("Starting BHI programming\n");
+
+	/* program start sbl download via  bhi protocol */
+	read_lock_bh(pm_lock);
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+		read_unlock_bh(pm_lock);
+		goto invalid_pm_state;
+	}
+
+	mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+		      upper_32_bits(dma_addr));
+	mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+		      lower_32_bits(dma_addr));
+	mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+	mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
+	mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id);
+	read_unlock_bh(pm_lock);
+
+	MHI_LOG("Waiting for image transfer completion\n");
+
+	/* waiting for image download completion */
+	wait_event_timeout(mhi_cntrl->state_event,
+			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+			   mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
+					      BHI_STATUS_MASK, BHI_STATUS_SHIFT,
+					      &tx_status) || tx_status,
+			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+		goto invalid_pm_state;
+
+	if (tx_status == BHI_STATUS_ERROR) {
+		MHI_ERR("Image transfer failed\n");
+		read_lock_bh(pm_lock);
+		if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+			for (i = 0; error_reg[i].name; i++) {
+				ret = mhi_read_reg(mhi_cntrl, base,
+						   error_reg[i].offset, &val);
+				if (ret)
+					break;
+				MHI_ERR("reg:%s value:0x%x\n",
+					error_reg[i].name, val);
+			}
+		}
+		read_unlock_bh(pm_lock);
+		goto invalid_pm_state;
+	}
+
+	return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
+
+invalid_pm_state:
+
+	return -EIO;
+}
+
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+			 struct image_info *image_info)
+{
+	int i;
+	struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+	for (i = 0; i < image_info->entries; i++, mhi_buf++)
+		mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+				  mhi_buf->dma_addr);
+
+	kfree(image_info->mhi_buf);
+	kfree(image_info);
+}
+
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+			 struct image_info **image_info,
+			 size_t alloc_size)
+{
+	size_t seg_size = mhi_cntrl->seg_len;
+	/* requier additional entry for vec table */
+	int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+	int i;
+	struct image_info *img_info;
+	struct mhi_buf *mhi_buf;
+
+	MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
+		alloc_size, seg_size, segments);
+
+	img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+	if (!img_info)
+		return -ENOMEM;
+
+	/* allocate memory for entries */
+	img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
+				    GFP_KERNEL);
+	if (!img_info->mhi_buf)
+		goto error_alloc_mhi_buf;
+
+	/* allocate and populate vector table */
+	mhi_buf = img_info->mhi_buf;
+	for (i = 0; i < segments; i++, mhi_buf++) {
+		size_t vec_size = seg_size;
+
+		/* last entry is for vector table */
+		if (i == segments - 1)
+			vec_size = sizeof(struct bhi_vec_entry) * i;
+
+		mhi_buf->len = vec_size;
+		mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
+					&mhi_buf->dma_addr, GFP_KERNEL);
+		if (!mhi_buf->buf)
+			goto error_alloc_segment;
+
+		MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i,
+			(u64)mhi_buf->dma_addr, mhi_buf->len);
+	}
+
+	img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
+	img_info->entries = segments;
+	*image_info = img_info;
+
+	MHI_LOG("Successfully allocated bhi vec table\n");
+
+	return 0;
+
+error_alloc_segment:
+	for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
+		mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
+				  mhi_buf->dma_addr);
+
+error_alloc_mhi_buf:
+	kfree(img_info);
+
+	return -ENOMEM;
+}
+
+static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
+			      const struct firmware *firmware,
+			      struct image_info *img_info)
+{
+	size_t remainder = firmware->size;
+	size_t to_cpy;
+	const u8 *buf = firmware->data;
+	int i = 0;
+	struct mhi_buf *mhi_buf = img_info->mhi_buf;
+	struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
+
+	while (remainder) {
+		MHI_ASSERT(i >= img_info->entries, "malformed vector table");
+
+		to_cpy = min(remainder, mhi_buf->len);
+		memcpy(mhi_buf->buf, buf, to_cpy);
+		bhi_vec->dma_addr = mhi_buf->dma_addr;
+		bhi_vec->size = to_cpy;
+
+		MHI_VERB("Setting Vector:0x%llx size: %llu\n",
+			 bhi_vec->dma_addr, bhi_vec->size);
+		buf += to_cpy;
+		remainder -= to_cpy;
+		i++;
+		bhi_vec++;
+		mhi_buf++;
+	}
+}
+
+void mhi_fw_load_worker(struct work_struct *work)
+{
+	int ret;
+	struct mhi_controller *mhi_cntrl;
+	const char *fw_name;
+	const struct firmware *firmware;
+	struct image_info *image_info;
+	void *buf;
+	dma_addr_t dma_addr;
+	size_t size;
+
+	mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
+
+	MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
+		TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 MHI_IN_PBL(mhi_cntrl->ee) ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("MHI is not in valid state\n");
+		return;
+	}
+
+	MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	/* if device in pthru, do reset to ready state transition */
+	if (mhi_cntrl->ee == MHI_EE_PTHRU)
+		goto fw_load_ee_pthru;
+
+	fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
+		mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+
+	if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
+						     !mhi_cntrl->seg_len))) {
+		MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
+		return;
+	}
+
+	ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
+	if (ret) {
+		MHI_ERR("Error loading firmware, ret:%d\n", ret);
+		return;
+	}
+
+	size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
+
+	/* the sbl size provided is maximum size, not necessarily image size */
+	if (size > firmware->size)
+		size = firmware->size;
+
+	buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
+	if (!buf) {
+		MHI_ERR("Could not allocate memory for image\n");
+		release_firmware(firmware);
+		return;
+	}
+
+	/* load sbl image */
+	memcpy(buf, firmware->data, size);
+	ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
+	mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
+
+	if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL)
+		release_firmware(firmware);
+
+	/* error or in edl, we're done */
+	if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+		return;
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	mhi_cntrl->dev_state = MHI_STATE_RESET;
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/*
+	 * if we're doing fbc, populate vector tables while
+	 * device transitioning into MHI READY state
+	 */
+	if (mhi_cntrl->fbc_download) {
+		ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
+					   firmware->size);
+		if (ret) {
+			MHI_ERR("Error alloc size of %zu\n", firmware->size);
+			goto error_alloc_fw_table;
+		}
+
+		MHI_LOG("Copying firmware image into vector table\n");
+
+		/* load the firmware into BHIE vec table */
+		mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+	}
+
+fw_load_ee_pthru:
+	/* transitioning into MHI RESET->READY state */
+	ret = mhi_ready_state_transition(mhi_cntrl);
+
+	MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
+
+	if (!mhi_cntrl->fbc_download)
+		return;
+
+	if (ret) {
+		MHI_ERR("Did not transition to READY state\n");
+		goto error_read;
+	}
+
+	/* wait for SBL event */
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->ee == MHI_EE_SBL ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("MHI did not enter BHIE\n");
+		goto error_read;
+	}
+
+	/* start full firmware image download */
+	image_info = mhi_cntrl->fbc_image;
+	ret = mhi_fw_load_amss(mhi_cntrl,
+			       /* last entry is vec table */
+			       &image_info->mhi_buf[image_info->entries - 1]);
+
+	MHI_LOG("amss fw_load, ret:%d\n", ret);
+
+	release_firmware(firmware);
+
+	return;
+
+error_read:
+	mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+	mhi_cntrl->fbc_image = NULL;
+
+error_alloc_fw_table:
+	release_firmware(firmware);
+}
diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c
new file mode 100644
index 0000000..85e27b7
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_dtr.c
@@ -0,0 +1,236 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+struct __packed dtr_ctrl_msg {
+	u32 preamble;
+	u32 msg_id;
+	u32 dest_id;
+	u32 size;
+	u32 msg;
+};
+
+#define CTRL_MAGIC (0x4C525443)
+#define CTRL_MSG_DTR BIT(0)
+#define CTRL_MSG_RTS BIT(1)
+#define CTRL_MSG_DCD BIT(0)
+#define CTRL_MSG_DSR BIT(1)
+#define CTRL_MSG_RI BIT(3)
+#define CTRL_HOST_STATE (0x10)
+#define CTRL_DEVICE_STATE (0x11)
+#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF)
+
+static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
+			    struct mhi_device *mhi_dev,
+			    u32 tiocm)
+{
+	struct dtr_ctrl_msg *dtr_msg = NULL;
+	struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
+	spinlock_t *res_lock = &mhi_dev->dev.devres_lock;
+	u32 cur_tiocm;
+	int ret = 0;
+
+	cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+
+	tiocm &= (TIOCM_DTR | TIOCM_RTS);
+
+	/* state did not changed */
+	if (cur_tiocm == tiocm)
+		return 0;
+
+	mutex_lock(&dtr_chan->mutex);
+
+	dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
+	if (!dtr_msg) {
+		ret = -ENOMEM;
+		goto tiocm_exit;
+	}
+
+	dtr_msg->preamble = CTRL_MAGIC;
+	dtr_msg->msg_id = CTRL_HOST_STATE;
+	dtr_msg->dest_id = mhi_dev->ul_chan_id;
+	dtr_msg->size = sizeof(u32);
+	if (tiocm & TIOCM_DTR)
+		dtr_msg->msg |= CTRL_MSG_DTR;
+	if (tiocm & TIOCM_RTS)
+		dtr_msg->msg |= CTRL_MSG_RTS;
+
+	reinit_completion(&dtr_chan->completion);
+	ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
+				 sizeof(*dtr_msg), MHI_EOT);
+	if (ret)
+		goto tiocm_exit;
+
+	ret = wait_for_completion_timeout(&dtr_chan->completion,
+				msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	if (!ret) {
+		MHI_ERR("Failed to receive transfer callback\n");
+		ret = -EIO;
+		goto tiocm_exit;
+	}
+
+	ret = 0;
+	spin_lock_irq(res_lock);
+	mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS);
+	mhi_dev->tiocm |= tiocm;
+	spin_unlock_irq(res_lock);
+
+tiocm_exit:
+	kfree(dtr_msg);
+	mutex_unlock(&dtr_chan->mutex);
+
+	return ret;
+}
+
+long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	int ret;
+
+	/* ioctl not supported by this controller */
+	if (!mhi_cntrl->dtr_dev)
+		return -EIO;
+
+	switch (cmd) {
+	case TIOCMGET:
+		return mhi_dev->tiocm;
+	case TIOCMSET:
+	{
+		u32 tiocm;
+
+		ret = get_user(tiocm, (u32 *)arg);
+		if (ret)
+			return ret;
+
+		return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm);
+	}
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(mhi_ioctl);
+
+static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
+			       struct mhi_result *mhi_result)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr;
+	u32 chan;
+	spinlock_t *res_lock;
+
+	if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) {
+		MHI_ERR("Unexpected length %zu received\n",
+			mhi_result->bytes_xferd);
+		return;
+	}
+
+	MHI_VERB("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n",
+		 dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id,
+		 dtr_msg->msg);
+
+	chan = CTRL_GET_CHID(dtr_msg);
+	if (chan >= mhi_cntrl->max_chan)
+		return;
+
+	mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev;
+	if (!mhi_dev)
+		return;
+
+	res_lock = &mhi_dev->dev.devres_lock;
+	spin_lock_irq(res_lock);
+	mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
+
+	if (dtr_msg->msg & CTRL_MSG_DCD)
+		mhi_dev->tiocm |= TIOCM_CD;
+
+	if (dtr_msg->msg & CTRL_MSG_DSR)
+		mhi_dev->tiocm |= TIOCM_DSR;
+
+	if (dtr_msg->msg & CTRL_MSG_RI)
+		mhi_dev->tiocm |= TIOCM_RI;
+	spin_unlock_irq(res_lock);
+
+	/* Notify the update */
+	mhi_notify(mhi_dev, MHI_CB_DTR_SIGNAL);
+}
+
+static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
+			       struct mhi_result *mhi_result)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
+
+	MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
+	if (!mhi_result->transaction_status)
+		complete(&dtr_chan->completion);
+}
+
+static void mhi_dtr_remove(struct mhi_device *mhi_dev)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	mhi_cntrl->dtr_dev = NULL;
+}
+
+static int mhi_dtr_probe(struct mhi_device *mhi_dev,
+			 const struct mhi_device_id *id)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	int ret;
+
+	MHI_LOG("Enter for DTR control channel\n");
+
+	ret = mhi_prepare_for_transfer(mhi_dev);
+	if (!ret)
+		mhi_cntrl->dtr_dev = mhi_dev;
+
+	MHI_LOG("Exit with ret:%d\n", ret);
+
+	return ret;
+}
+
+static const struct mhi_device_id mhi_dtr_table[] = {
+	{ .chan = "IP_CTRL" },
+	{},
+};
+
+static struct mhi_driver mhi_dtr_driver = {
+	.id_table = mhi_dtr_table,
+	.remove = mhi_dtr_remove,
+	.probe = mhi_dtr_probe,
+	.ul_xfer_cb = mhi_dtr_ul_xfer_cb,
+	.dl_xfer_cb = mhi_dtr_dl_xfer_cb,
+	.driver = {
+		.name = "MHI_DTR",
+		.owner = THIS_MODULE,
+	}
+};
+
+int __init mhi_dtr_init(void)
+{
+	return mhi_driver_register(&mhi_dtr_driver);
+}
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
new file mode 100644
index 0000000..53a9e5c
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -0,0 +1,1798 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+static struct dentry *mhi_parent;
+
+const char * const mhi_ee_str[MHI_EE_MAX] = {
+	[MHI_EE_PBL] = "PBL",
+	[MHI_EE_SBL] = "SBL",
+	[MHI_EE_AMSS] = "AMSS",
+	[MHI_EE_RDDM] = "RDDM",
+	[MHI_EE_WFW] = "WFW",
+	[MHI_EE_PTHRU] = "PASS THRU",
+	[MHI_EE_EDL] = "EDL",
+	[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
+	[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+};
+
+const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = {
+	[MHI_ST_TRANSITION_PBL] = "PBL",
+	[MHI_ST_TRANSITION_READY] = "READY",
+	[MHI_ST_TRANSITION_SBL] = "SBL",
+	[MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
+};
+
+const char * const mhi_state_str[MHI_STATE_MAX] = {
+	[MHI_STATE_RESET] = "RESET",
+	[MHI_STATE_READY] = "READY",
+	[MHI_STATE_M0] = "M0",
+	[MHI_STATE_M1] = "M1",
+	[MHI_STATE_M2] = "M2",
+	[MHI_STATE_M3] = "M3",
+	[MHI_STATE_M3_FAST] = "M3_FAST",
+	[MHI_STATE_BHI] = "BHI",
+	[MHI_STATE_SYS_ERR] = "SYS_ERR",
+};
+
+static const char * const mhi_pm_state_str[] = {
+	[MHI_PM_BIT_DISABLE] = "DISABLE",
+	[MHI_PM_BIT_POR] = "POR",
+	[MHI_PM_BIT_M0] = "M0",
+	[MHI_PM_BIT_M2] = "M2",
+	[MHI_PM_BIT_M3_ENTER] = "M?->M3",
+	[MHI_PM_BIT_M3] = "M3",
+	[MHI_PM_BIT_M3_EXIT] = "M3->M0",
+	[MHI_PM_BIT_FW_DL_ERR] = "FW DL Error",
+	[MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect",
+	[MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
+	[MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
+	[MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+};
+
+struct mhi_bus mhi_bus;
+
+const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
+{
+	int index = find_last_bit((unsigned long *)&state, 32);
+
+	if (index >= ARRAY_SIZE(mhi_pm_state_str))
+		return "Invalid State";
+
+	return mhi_pm_state_str[index];
+}
+
+static ssize_t bus_vote_show(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			atomic_read(&mhi_dev->bus_vote));
+}
+
+static ssize_t bus_vote_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf,
+			      size_t count)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	int ret = -EINVAL;
+
+	if (sysfs_streq(buf, "get")) {
+		ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_BUS);
+	} else if (sysfs_streq(buf, "put")) {
+		mhi_device_put(mhi_dev, MHI_VOTE_BUS);
+		ret = 0;
+	}
+
+	return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(bus_vote);
+
+static ssize_t device_vote_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			atomic_read(&mhi_dev->dev_vote));
+}
+
+static ssize_t device_vote_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t count)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	int ret = -EINVAL;
+
+	if (sysfs_streq(buf, "get")) {
+		ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_DEVICE);
+	} else if (sysfs_streq(buf, "put")) {
+		mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+		ret = 0;
+	}
+
+	return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(device_vote);
+
+static struct attribute *mhi_vote_attrs[] = {
+	&dev_attr_bus_vote.attr,
+	&dev_attr_device_vote.attr,
+	NULL,
+};
+
+static const struct attribute_group mhi_vote_group = {
+	.attrs = mhi_vote_attrs,
+};
+
+int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
+{
+	return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
+				  &mhi_vote_group);
+}
+
+void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+	sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);
+
+	/* relinquish any pending votes for device */
+	while (atomic_read(&mhi_dev->dev_vote))
+		mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+	/* remove pending votes for the bus */
+	while (atomic_read(&mhi_dev->bus_vote))
+		mhi_device_put(mhi_dev, MHI_VOTE_BUS);
+}
+
+/* MHI protocol require transfer ring to be aligned to ring length */
+static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
+				  struct mhi_ring *ring,
+				  u64 len)
+{
+	ring->alloc_size = len + (len - 1);
+	ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
+					       &ring->dma_handle, GFP_KERNEL);
+	if (!ring->pre_aligned)
+		return -ENOMEM;
+
+	ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
+	ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
+	return 0;
+}
+
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
+{
+	int i;
+	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (!mhi_event->request_irq)
+			continue;
+
+		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
+	}
+
+	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+}
+
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
+{
+	int i;
+	int ret;
+	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+	/* for BHI INTVEC msi */
+	ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handlr,
+				   mhi_intvec_threaded_handlr,
+				   IRQF_ONESHOT | IRQF_NO_SUSPEND,
+				   "mhi", mhi_cntrl);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (!mhi_event->request_irq)
+			continue;
+
+		ret = request_irq(mhi_cntrl->irq[mhi_event->msi],
+				  mhi_msi_handlr, IRQF_SHARED | IRQF_NO_SUSPEND,
+				  "mhi", mhi_event);
+		if (ret) {
+			MHI_ERR("Error requesting irq:%d for ev:%d\n",
+				mhi_cntrl->irq[mhi_event->msi], i);
+			goto error_request;
+		}
+	}
+
+	return 0;
+
+error_request:
+	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+		if (!mhi_event->request_irq)
+			continue;
+
+		free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
+	}
+	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
+
+	return ret;
+}
+
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+	int i;
+	struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
+	struct mhi_cmd *mhi_cmd;
+	struct mhi_event *mhi_event;
+	struct mhi_ring *ring;
+
+	mhi_cmd = mhi_cntrl->mhi_cmd;
+	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
+		ring = &mhi_cmd->ring;
+		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+				  ring->pre_aligned, ring->dma_handle);
+		ring->base = NULL;
+		ring->iommu_base = 0;
+	}
+
+	mhi_free_coherent(mhi_cntrl,
+			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (mhi_event->offload_ev)
+			continue;
+
+		ring = &mhi_event->ring;
+		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+				  ring->pre_aligned, ring->dma_handle);
+		ring->base = NULL;
+		ring->iommu_base = 0;
+	}
+
+	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+			  mhi_ctxt->er_ctxt_addr);
+
+	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+			  mhi_ctxt->chan_ctxt_addr);
+
+	kfree(mhi_ctxt);
+	mhi_cntrl->mhi_ctxt = NULL;
+}
+
+static int mhi_init_debugfs_mhi_states_open(struct inode *inode,
+					    struct file *fp)
+{
+	return single_open(fp, mhi_debugfs_mhi_states_show, inode->i_private);
+}
+
+static int mhi_init_debugfs_mhi_event_open(struct inode *inode, struct file *fp)
+{
+	return single_open(fp, mhi_debugfs_mhi_event_show, inode->i_private);
+}
+
+static int mhi_init_debugfs_mhi_chan_open(struct inode *inode, struct file *fp)
+{
+	return single_open(fp, mhi_debugfs_mhi_chan_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_state_ops = {
+	.open = mhi_init_debugfs_mhi_states_open,
+	.release = single_release,
+	.read = seq_read,
+};
+
+static const struct file_operations debugfs_ev_ops = {
+	.open = mhi_init_debugfs_mhi_event_open,
+	.release = single_release,
+	.read = seq_read,
+};
+
+static const struct file_operations debugfs_chan_ops = {
+	.open = mhi_init_debugfs_mhi_chan_open,
+	.release = single_release,
+	.read = seq_read,
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL,
+			mhi_debugfs_trigger_reset, "%llu\n");
+
+void mhi_init_debugfs(struct mhi_controller *mhi_cntrl)
+{
+	struct dentry *dentry;
+	char node[32];
+
+	if (!mhi_cntrl->parent)
+		return;
+
+	snprintf(node, sizeof(node), "%04x_%02u:%02u.%02u",
+		 mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
+		 mhi_cntrl->slot);
+
+	dentry = debugfs_create_dir(node, mhi_cntrl->parent);
+	if (IS_ERR_OR_NULL(dentry))
+		return;
+
+	debugfs_create_file("states", 0444, dentry, mhi_cntrl,
+			    &debugfs_state_ops);
+	debugfs_create_file("events", 0444, dentry, mhi_cntrl,
+			    &debugfs_ev_ops);
+	debugfs_create_file("chan", 0444, dentry, mhi_cntrl, &debugfs_chan_ops);
+	debugfs_create_file("reset", 0444, dentry, mhi_cntrl,
+			    &debugfs_trigger_reset_fops);
+	mhi_cntrl->dentry = dentry;
+}
+
+void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl)
+{
+	debugfs_remove_recursive(mhi_cntrl->dentry);
+	mhi_cntrl->dentry = NULL;
+}
+
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_ctxt *mhi_ctxt;
+	struct mhi_chan_ctxt *chan_ctxt;
+	struct mhi_event_ctxt *er_ctxt;
+	struct mhi_cmd_ctxt *cmd_ctxt;
+	struct mhi_chan *mhi_chan;
+	struct mhi_event *mhi_event;
+	struct mhi_cmd *mhi_cmd;
+	int ret = -ENOMEM, i;
+
+	atomic_set(&mhi_cntrl->dev_wake, 0);
+	atomic_set(&mhi_cntrl->alloc_size, 0);
+	atomic_set(&mhi_cntrl->pending_pkts, 0);
+
+	mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
+	if (!mhi_ctxt)
+		return -ENOMEM;
+
+	/* setup channel ctxt */
+	mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
+			sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan,
+			&mhi_ctxt->chan_ctxt_addr, GFP_KERNEL);
+	if (!mhi_ctxt->chan_ctxt)
+		goto error_alloc_chan_ctxt;
+
+	mhi_chan = mhi_cntrl->mhi_chan;
+	chan_ctxt = mhi_ctxt->chan_ctxt;
+	for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+		/* If it's offload channel skip this step */
+		if (mhi_chan->offload_ch)
+			continue;
+
+		chan_ctxt->chstate = MHI_CH_STATE_DISABLED;
+		chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode;
+		chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg;
+		chan_ctxt->chtype = mhi_chan->type;
+		chan_ctxt->erindex = mhi_chan->er_index;
+
+		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+		mhi_chan->tre_ring.db_addr = &chan_ctxt->wp;
+	}
+
+	/* setup event context */
+	mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
+			sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings,
+			&mhi_ctxt->er_ctxt_addr, GFP_KERNEL);
+	if (!mhi_ctxt->er_ctxt)
+		goto error_alloc_er_ctxt;
+
+	er_ctxt = mhi_ctxt->er_ctxt;
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+		     mhi_event++) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		/* it's a satellite ev, we do not touch it */
+		if (mhi_event->offload_ev)
+			continue;
+
+		er_ctxt->intmodc = 0;
+		er_ctxt->intmodt = mhi_event->intmod;
+		er_ctxt->ertype = MHI_ER_TYPE_VALID;
+		er_ctxt->msivec = mhi_event->msi;
+		mhi_event->db_cfg.db_mode = true;
+
+		ring->el_size = sizeof(struct mhi_tre);
+		ring->len = ring->el_size * ring->elements;
+		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+		if (ret)
+			goto error_alloc_er;
+
+		ring->rp = ring->wp = ring->base;
+		er_ctxt->rbase = ring->iommu_base;
+		er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
+		er_ctxt->rlen = ring->len;
+		ring->ctxt_wp = &er_ctxt->wp;
+	}
+
+	/* setup cmd context */
+	mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
+				sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+				&mhi_ctxt->cmd_ctxt_addr, GFP_KERNEL);
+	if (!mhi_ctxt->cmd_ctxt)
+		goto error_alloc_er;
+
+	mhi_cmd = mhi_cntrl->mhi_cmd;
+	cmd_ctxt = mhi_ctxt->cmd_ctxt;
+	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+		struct mhi_ring *ring = &mhi_cmd->ring;
+
+		ring->el_size = sizeof(struct mhi_tre);
+		ring->elements = CMD_EL_PER_RING;
+		ring->len = ring->el_size * ring->elements;
+		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
+		if (ret)
+			goto error_alloc_cmd;
+
+		ring->rp = ring->wp = ring->base;
+		cmd_ctxt->rbase = ring->iommu_base;
+		cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
+		cmd_ctxt->rlen = ring->len;
+		ring->ctxt_wp = &cmd_ctxt->wp;
+	}
+
+	mhi_cntrl->mhi_ctxt = mhi_ctxt;
+
+	return 0;
+
+error_alloc_cmd:
+	for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
+		struct mhi_ring *ring = &mhi_cmd->ring;
+
+		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+				  ring->pre_aligned, ring->dma_handle);
+	}
+	mhi_free_coherent(mhi_cntrl,
+			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
+			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
+	i = mhi_cntrl->total_ev_rings;
+	mhi_event = mhi_cntrl->mhi_event + i;
+
+error_alloc_er:
+	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		if (mhi_event->offload_ev)
+			continue;
+
+		mhi_free_coherent(mhi_cntrl, ring->alloc_size,
+				  ring->pre_aligned, ring->dma_handle);
+	}
+	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
+			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
+			  mhi_ctxt->er_ctxt_addr);
+
+error_alloc_er_ctxt:
+	mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
+			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
+			  mhi_ctxt->chan_ctxt_addr);
+
+error_alloc_chan_ctxt:
+	kfree(mhi_ctxt);
+
+	return ret;
+}
+
+/* to be used only if a single event ring with the type is present */
+static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
+			    enum mhi_er_data_type type)
+{
+	int i;
+	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+
+	/* find event ring for requested type */
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (mhi_event->data_type == type)
+			return mhi_event->er_index;
+	}
+
+	return -ENOENT;
+}
+
+int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_timesync *mhi_tsync;
+	u32 time_offset, db_offset;
+	int ret;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+		ret = -EIO;
+		goto exit_timesync;
+	}
+
+	ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID,
+					&time_offset);
+	if (ret) {
+		MHI_LOG("No timesync capability found\n");
+		goto exit_timesync;
+	}
+
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	if (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable ||
+	     !mhi_cntrl->lpm_enable)
+		return -EINVAL;
+
+	/* register method supported */
+	mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL);
+	if (!mhi_tsync)
+		return -ENOMEM;
+
+	spin_lock_init(&mhi_tsync->lock);
+	mutex_init(&mhi_tsync->lpm_mutex);
+	INIT_LIST_HEAD(&mhi_tsync->head);
+	init_completion(&mhi_tsync->completion);
+
+	/* save time_offset for obtaining time */
+	MHI_LOG("TIME OFFS:0x%x\n", time_offset);
+	mhi_tsync->time_reg = mhi_cntrl->regs + time_offset
+			      + TIMESYNC_TIME_LOW_OFFSET;
+
+	mhi_cntrl->mhi_tsync = mhi_tsync;
+
+	ret = mhi_create_timesync_sysfs(mhi_cntrl);
+	if (unlikely(ret)) {
+		/* kernel method still work */
+		MHI_ERR("Failed to create timesync sysfs nodes\n");
+	}
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+		ret = -EIO;
+		goto exit_timesync;
+	}
+
+	/* get DB offset if supported, else return */
+	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+			   time_offset + TIMESYNC_DB_OFFSET, &db_offset);
+	if (ret || !db_offset) {
+		ret = 0;
+		goto exit_timesync;
+	}
+
+	MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset);
+	mhi_tsync->db = mhi_cntrl->regs + db_offset;
+
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	/* get time-sync event ring configuration */
+	ret = mhi_get_er_index(mhi_cntrl, MHI_ER_TSYNC_ELEMENT_TYPE);
+	if (ret < 0) {
+		MHI_LOG("Could not find timesync event ring\n");
+		return ret;
+	}
+
+	mhi_tsync->er_index = ret;
+
+	ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG);
+	if (ret) {
+		MHI_ERR("Failed to send time sync cfg cmd\n");
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&mhi_tsync->completion,
+			msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) {
+		MHI_ERR("Failed to get time cfg cmd completion\n");
+		return -EIO;
+	}
+
+	return 0;
+
+exit_timesync:
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return ret;
+}
+
+static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
+{
+	int ret, er_index;
+	u32 bw_cfg_offset;
+
+	/* controller doesn't support dynamic bw switch */
+	if (!mhi_cntrl->bw_scale)
+		return -ENODEV;
+
+	ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
+					&bw_cfg_offset);
+	if (ret)
+		return ret;
+
+	/* No ER configured to support BW scale */
+	er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE_ELEMENT_TYPE);
+	if (ret < 0)
+		return er_index;
+
+	bw_cfg_offset += BW_SCALE_CFG_OFFSET;
+
+	MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
+
+	/* advertise host support */
+	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
+		      MHI_BW_SCALE_SETUP(er_index));
+
+	return 0;
+}
+
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+{
+	u32 val;
+	int i, ret;
+	struct mhi_chan *mhi_chan;
+	struct mhi_event *mhi_event;
+	void __iomem *base = mhi_cntrl->regs;
+	struct {
+		u32 offset;
+		u32 mask;
+		u32 shift;
+		u32 val;
+	} reg_info[] = {
+		{
+			CCABAP_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+		},
+		{
+			CCABAP_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
+		},
+		{
+			ECABAP_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+		},
+		{
+			ECABAP_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
+		},
+		{
+			CRCBAP_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+		},
+		{
+			CRCBAP_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
+		},
+		{
+			MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
+			mhi_cntrl->total_ev_rings,
+		},
+		{
+			MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
+			mhi_cntrl->hw_ev_rings,
+		},
+		{
+			MHICTRLBASE_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->iova_start),
+		},
+		{
+			MHICTRLBASE_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->iova_start),
+		},
+		{
+			MHIDATABASE_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->iova_start),
+		},
+		{
+			MHIDATABASE_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->iova_start),
+		},
+		{
+			MHICTRLLIMIT_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->iova_stop),
+		},
+		{
+			MHICTRLLIMIT_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->iova_stop),
+		},
+		{
+			MHIDATALIMIT_HIGHER, U32_MAX, 0,
+			upper_32_bits(mhi_cntrl->iova_stop),
+		},
+		{
+			MHIDATALIMIT_LOWER, U32_MAX, 0,
+			lower_32_bits(mhi_cntrl->iova_stop),
+		},
+		{ 0, 0, 0 }
+	};
+
+	MHI_LOG("Initializing MMIO\n");
+
+	/* set up DB register for all the chan rings */
+	ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
+				 CHDBOFF_CHDBOFF_SHIFT, &val);
+	if (ret)
+		return -EIO;
+
+	MHI_LOG("CHDBOFF:0x%x\n", val);
+
+	/* setup wake db */
+	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+	mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+	mhi_cntrl->wake_set = false;
+
+	/* setup bw scale db */
+	mhi_cntrl->bw_scale_db = base + val + (8 * MHI_BW_SCALE_CHAN_DB);
+
+	/* setup channel db addresses */
+	mhi_chan = mhi_cntrl->mhi_chan;
+	for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
+		mhi_chan->tre_ring.db_addr = base + val;
+
+	/* setup event ring db addresses */
+	ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
+				 ERDBOFF_ERDBOFF_SHIFT, &val);
+	if (ret)
+		return -EIO;
+
+	MHI_LOG("ERDBOFF:0x%x\n", val);
+
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+		if (mhi_event->offload_ev)
+			continue;
+
+		mhi_event->ring.db_addr = base + val;
+	}
+
+	/* set up DB register for primary CMD rings */
+	mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
+
+	MHI_LOG("Programming all MMIO values.\n");
+	for (i = 0; reg_info[i].offset; i++)
+		mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
+				    reg_info[i].mask, reg_info[i].shift,
+				    reg_info[i].val);
+
+	/* setup bandwidth scaling features */
+	mhi_init_bw_scale(mhi_cntrl);
+
+	return 0;
+}
+
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+			  struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *buf_ring;
+	struct mhi_ring *tre_ring;
+	struct mhi_chan_ctxt *chan_ctxt;
+
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+
+	mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+			  tre_ring->pre_aligned, tre_ring->dma_handle);
+	vfree(buf_ring->base);
+
+	buf_ring->base = tre_ring->base = NULL;
+	chan_ctxt->rbase = 0;
+}
+
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+		       struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *buf_ring;
+	struct mhi_ring *tre_ring;
+	struct mhi_chan_ctxt *chan_ctxt;
+	int ret;
+
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+	tre_ring->el_size = sizeof(struct mhi_tre);
+	tre_ring->len = tre_ring->el_size * tre_ring->elements;
+	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
+	ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
+	if (ret)
+		return -ENOMEM;
+
+	buf_ring->el_size = sizeof(struct mhi_buf_info);
+	buf_ring->len = buf_ring->el_size * buf_ring->elements;
+	buf_ring->base = vzalloc(buf_ring->len);
+
+	if (!buf_ring->base) {
+		mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
+				  tre_ring->pre_aligned, tre_ring->dma_handle);
+		return -ENOMEM;
+	}
+
+	chan_ctxt->chstate = MHI_CH_STATE_ENABLED;
+	chan_ctxt->rbase = tre_ring->iommu_base;
+	chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
+	chan_ctxt->rlen = tre_ring->len;
+	tre_ring->ctxt_wp = &chan_ctxt->wp;
+
+	tre_ring->rp = tre_ring->wp = tre_ring->base;
+	buf_ring->rp = buf_ring->wp = buf_ring->base;
+	mhi_chan->db_cfg.db_mode = 1;
+
+	/* update to all cores */
+	smp_wmb();
+
+	return 0;
+}
+
+int mhi_device_configure(struct mhi_device *mhi_dev,
+			 enum dma_data_direction dir,
+			 struct mhi_buf *cfg_tbl,
+			 int elements)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+	struct mhi_event_ctxt *er_ctxt;
+	struct mhi_chan_ctxt *ch_ctxt;
+	int er_index, chan;
+
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		mhi_chan = mhi_dev->ul_chan;
+		break;
+	case DMA_BIDIRECTIONAL:
+	case DMA_FROM_DEVICE:
+	case DMA_NONE:
+		mhi_chan = mhi_dev->dl_chan;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	er_index = mhi_chan->er_index;
+	chan = mhi_chan->chan;
+
+	for (; elements > 0; elements--, cfg_tbl++) {
+		/* update event context array */
+		if (!strcmp(cfg_tbl->name, "ECA")) {
+			er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index];
+			if (sizeof(*er_ctxt) != cfg_tbl->len) {
+				MHI_ERR(
+					"Invalid ECA size, expected:%zu actual%zu\n",
+					sizeof(*er_ctxt), cfg_tbl->len);
+				return -EINVAL;
+			}
+			memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt));
+			continue;
+		}
+
+		/* update channel context array */
+		if (!strcmp(cfg_tbl->name, "CCA")) {
+			ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan];
+			if (cfg_tbl->len != sizeof(*ch_ctxt)) {
+				MHI_ERR(
+					"Invalid CCA size, expected:%zu actual:%zu\n",
+					sizeof(*ch_ctxt), cfg_tbl->len);
+				return -EINVAL;
+			}
+			memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt));
+			continue;
+		}
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
+			   struct device_node *of_node)
+{
+	int i, ret, num = 0;
+	struct mhi_event *mhi_event;
+	struct device_node *child;
+
+	of_node = of_find_node_by_name(of_node, "mhi_events");
+	if (!of_node)
+		return -EINVAL;
+
+	for_each_available_child_of_node(of_node, child) {
+		if (!strcmp(child->name, "mhi_event"))
+			num++;
+	}
+
+	if (!num)
+		return -EINVAL;
+
+	mhi_cntrl->total_ev_rings = num;
+	mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
+				       GFP_KERNEL);
+	if (!mhi_cntrl->mhi_event)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);
+
+	/* populate ev ring */
+	mhi_event = mhi_cntrl->mhi_event;
+	i = 0;
+	for_each_available_child_of_node(of_node, child) {
+		if (strcmp(child->name, "mhi_event"))
+			continue;
+
+		mhi_event->er_index = i++;
+		ret = of_property_read_u32(child, "mhi,num-elements",
+					   (u32 *)&mhi_event->ring.elements);
+		if (ret)
+			goto error_ev_cfg;
+
+		ret = of_property_read_u32(child, "mhi,intmod",
+					   &mhi_event->intmod);
+		if (ret)
+			goto error_ev_cfg;
+
+		ret = of_property_read_u32(child, "mhi,msi",
+					   &mhi_event->msi);
+		if (ret)
+			goto error_ev_cfg;
+
+		ret = of_property_read_u32(child, "mhi,chan",
+					   &mhi_event->chan);
+		if (!ret) {
+			if (mhi_event->chan >= mhi_cntrl->max_chan)
+				goto error_ev_cfg;
+			/* this event ring has a dedicated channel */
+			mhi_event->mhi_chan =
+				&mhi_cntrl->mhi_chan[mhi_event->chan];
+		}
+
+		ret = of_property_read_u32(child, "mhi,priority",
+					   &mhi_event->priority);
+		if (ret)
+			goto error_ev_cfg;
+
+		ret = of_property_read_u32(child, "mhi,brstmode",
+					   &mhi_event->db_cfg.brstmode);
+		if (ret || MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
+			goto error_ev_cfg;
+
+		mhi_event->db_cfg.process_db =
+			(mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ?
+			mhi_db_brstmode : mhi_db_brstmode_disable;
+
+		ret = of_property_read_u32(child, "mhi,data-type",
+					   &mhi_event->data_type);
+		if (ret)
+			mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE;
+
+		if (mhi_event->data_type > MHI_ER_DATA_TYPE_MAX)
+			goto error_ev_cfg;
+
+		switch (mhi_event->data_type) {
+		case MHI_ER_DATA_ELEMENT_TYPE:
+			mhi_event->process_event = mhi_process_data_event_ring;
+			break;
+		case MHI_ER_CTRL_ELEMENT_TYPE:
+			mhi_event->process_event = mhi_process_ctrl_ev_ring;
+			break;
+		case MHI_ER_TSYNC_ELEMENT_TYPE:
+			mhi_event->process_event = mhi_process_tsync_event_ring;
+			break;
+		case MHI_ER_BW_SCALE_ELEMENT_TYPE:
+			mhi_event->process_event = mhi_process_bw_scale_ev_ring;
+			break;
+		}
+
+		mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev");
+		if (mhi_event->hw_ring)
+			mhi_cntrl->hw_ev_rings++;
+		else
+			mhi_cntrl->sw_ev_rings++;
+		mhi_event->cl_manage = of_property_read_bool(child,
+							"mhi,client-manage");
+		mhi_event->offload_ev = of_property_read_bool(child,
+							      "mhi,offload");
+
+		/*
+		 * low priority events are handled in a separate worker thread
+		 * to allow for sleeping functions to be called.
+		 */
+		if (!mhi_event->offload_ev) {
+			if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+				list_add_tail(&mhi_event->node,
+						&mhi_cntrl->lp_ev_rings);
+			else
+				mhi_event->request_irq = true;
+		}
+
+		mhi_event++;
+	}
+
+	/* we need msi for each event ring + additional one for BHI */
+	mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1;
+
+	return 0;
+
+error_ev_cfg:
+
+	kfree(mhi_cntrl->mhi_event);
+	return -EINVAL;
+}
+static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
+			   struct device_node *of_node)
+{
+	int ret;
+	struct device_node *child;
+	u32 chan;
+
+	ret = of_property_read_u32(of_node, "mhi,max-channels",
+				   &mhi_cntrl->max_chan);
+	if (ret)
+		return ret;
+
+	of_node = of_find_node_by_name(of_node, "mhi_channels");
+	if (!of_node)
+		return -EINVAL;
+
+	mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+				      sizeof(*mhi_cntrl->mhi_chan));
+	if (!mhi_cntrl->mhi_chan)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
+
+	/* populate channel configurations */
+	for_each_available_child_of_node(of_node, child) {
+		struct mhi_chan *mhi_chan;
+
+		if (strcmp(child->name, "mhi_chan"))
+			continue;
+
+		ret = of_property_read_u32(child, "reg", &chan);
+		if (ret || chan >= mhi_cntrl->max_chan)
+			goto error_chan_cfg;
+
+		mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+		ret = of_property_read_string(child, "label",
+					      &mhi_chan->name);
+		if (ret)
+			goto error_chan_cfg;
+
+		mhi_chan->chan = chan;
+
+		ret = of_property_read_u32(child, "mhi,num-elements",
+					   (u32 *)&mhi_chan->tre_ring.elements);
+		if (!ret && !mhi_chan->tre_ring.elements)
+			goto error_chan_cfg;
+
+		/*
+		 * For some channels, local ring len should be bigger than
+		 * transfer ring len due to internal logical channels in device.
+		 * So host can queue much more buffers than transfer ring len.
+		 * Example, RSC channels should have a larger local channel
+		 * than transfer ring length.
+		 */
+		ret = of_property_read_u32(child, "mhi,local-elements",
+					   (u32 *)&mhi_chan->buf_ring.elements);
+		if (ret)
+			mhi_chan->buf_ring.elements =
+				mhi_chan->tre_ring.elements;
+
+		ret = of_property_read_u32(child, "mhi,event-ring",
+					   &mhi_chan->er_index);
+		if (ret)
+			goto error_chan_cfg;
+
+		ret = of_property_read_u32(child, "mhi,chan-dir",
+					   &mhi_chan->dir);
+		if (ret)
+			goto error_chan_cfg;
+
+		/*
+		 * For most channels, chtype is identical to channel directions,
+		 * if not defined, assign ch direction to chtype
+		 */
+		ret = of_property_read_u32(child, "mhi,chan-type",
+					   &mhi_chan->type);
+		if (ret)
+			mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
+
+		ret = of_property_read_u32(child, "mhi,ee", &mhi_chan->ee_mask);
+		if (ret)
+			goto error_chan_cfg;
+
+		of_property_read_u32(child, "mhi,pollcfg",
+				     &mhi_chan->db_cfg.pollcfg);
+
+		ret = of_property_read_u32(child, "mhi,data-type",
+					   &mhi_chan->xfer_type);
+		if (ret)
+			goto error_chan_cfg;
+
+		switch (mhi_chan->xfer_type) {
+		case MHI_XFER_BUFFER:
+			mhi_chan->gen_tre = mhi_gen_tre;
+			mhi_chan->queue_xfer = mhi_queue_buf;
+			break;
+		case MHI_XFER_SKB:
+			mhi_chan->queue_xfer = mhi_queue_skb;
+			break;
+		case MHI_XFER_SCLIST:
+			mhi_chan->gen_tre = mhi_gen_tre;
+			mhi_chan->queue_xfer = mhi_queue_sclist;
+			break;
+		case MHI_XFER_NOP:
+			mhi_chan->queue_xfer = mhi_queue_nop;
+			break;
+		case MHI_XFER_DMA:
+		case MHI_XFER_RSC_DMA:
+			mhi_chan->queue_xfer = mhi_queue_dma;
+			break;
+		default:
+			goto error_chan_cfg;
+		}
+
+		mhi_chan->lpm_notify = of_property_read_bool(child,
+							     "mhi,lpm-notify");
+		mhi_chan->offload_ch = of_property_read_bool(child,
+							"mhi,offload-chan");
+		mhi_chan->db_cfg.reset_req = of_property_read_bool(child,
+							"mhi,db-mode-switch");
+		mhi_chan->pre_alloc = of_property_read_bool(child,
+							    "mhi,auto-queue");
+		mhi_chan->auto_start = of_property_read_bool(child,
+							     "mhi,auto-start");
+		mhi_chan->wake_capable = of_property_read_bool(child,
+							"mhi,wake-capable");
+
+		if (mhi_chan->pre_alloc &&
+		    (mhi_chan->dir != DMA_FROM_DEVICE ||
+		     mhi_chan->xfer_type != MHI_XFER_BUFFER))
+			goto error_chan_cfg;
+
+		/* bi-dir and dirctionless channels must be a offload chan */
+		if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
+		     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch)
+			goto error_chan_cfg;
+
+		/* if mhi host allocate the buffers then client cannot queue */
+		if (mhi_chan->pre_alloc)
+			mhi_chan->queue_xfer = mhi_queue_nop;
+
+		if (!mhi_chan->offload_ch) {
+			ret = of_property_read_u32(child, "mhi,doorbell-mode",
+						   &mhi_chan->db_cfg.brstmode);
+			if (ret ||
+			    MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode))
+				goto error_chan_cfg;
+
+			mhi_chan->db_cfg.process_db =
+				(mhi_chan->db_cfg.brstmode ==
+				 MHI_BRSTMODE_ENABLE) ?
+				mhi_db_brstmode : mhi_db_brstmode_disable;
+		}
+
+		mhi_chan->configured = true;
+
+		if (mhi_chan->lpm_notify)
+			list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
+	}
+
+	return 0;
+
+error_chan_cfg:
+	vfree(mhi_cntrl->mhi_chan);
+
+	return -EINVAL;
+}
+
+static int of_parse_dt(struct mhi_controller *mhi_cntrl,
+		       struct device_node *of_node)
+{
+	int ret;
+	enum mhi_ee i;
+	u32 *ee;
+	u32 bhie_offset;
+
+	/* parse MHI channel configuration */
+	ret = of_parse_ch_cfg(mhi_cntrl, of_node);
+	if (ret)
+		return ret;
+
+	/* parse MHI event configuration */
+	ret = of_parse_ev_cfg(mhi_cntrl, of_node);
+	if (ret)
+		goto error_ev_cfg;
+
+	ret = of_property_read_u32(of_node, "mhi,timeout",
+				   &mhi_cntrl->timeout_ms);
+	if (ret)
+		mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
+
+	mhi_cntrl->bounce_buf = of_property_read_bool(of_node, "mhi,use-bb");
+	ret = of_property_read_u32(of_node, "mhi,buffer-len",
+				   (u32 *)&mhi_cntrl->buffer_len);
+	if (ret)
+		mhi_cntrl->buffer_len = MHI_MAX_MTU;
+
+	/* by default host allowed to ring DB both M0 and M2 state */
+	mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
+	if (of_property_read_bool(of_node, "mhi,m2-no-db-access"))
+		mhi_cntrl->db_access &= ~MHI_PM_M2;
+
+	/* parse the device ee table */
+	for (i = MHI_EE_PBL, ee = mhi_cntrl->ee_table; i < MHI_EE_MAX;
+	     i++, ee++) {
+		/* setup the default ee before checking for override */
+		*ee = i;
+		ret = of_property_match_string(of_node, "mhi,ee-names",
+					       mhi_ee_str[i]);
+		if (ret < 0)
+			continue;
+
+		of_property_read_u32_index(of_node, "mhi,ee", ret, ee);
+	}
+
+	ret = of_property_read_u32(of_node, "mhi,bhie-offset", &bhie_offset);
+	if (!ret)
+		mhi_cntrl->bhie = mhi_cntrl->regs + bhie_offset;
+
+	return 0;
+
+error_ev_cfg:
+	kfree(mhi_cntrl->mhi_chan);
+
+	return ret;
+}
+
+int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+	int i;
+	struct mhi_event *mhi_event;
+	struct mhi_chan *mhi_chan;
+	struct mhi_cmd *mhi_cmd;
+	struct mhi_device *mhi_dev;
+	u32 soc_info;
+
+	if (!mhi_cntrl->of_node)
+		return -EINVAL;
+
+	if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put)
+		return -EINVAL;
+
+	if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status)
+		return -EINVAL;
+
+	ret = of_parse_dt(mhi_cntrl, mhi_cntrl->of_node);
+	if (ret)
+		return -EINVAL;
+
+	mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
+				     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+	if (!mhi_cntrl->mhi_cmd) {
+		ret = -ENOMEM;
+		goto error_alloc_cmd;
+	}
+
+	INIT_LIST_HEAD(&mhi_cntrl->transition_list);
+	mutex_init(&mhi_cntrl->pm_mutex);
+	rwlock_init(&mhi_cntrl->pm_lock);
+	spin_lock_init(&mhi_cntrl->transition_lock);
+	spin_lock_init(&mhi_cntrl->wlock);
+	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
+	INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
+	INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
+	INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
+	init_waitqueue_head(&mhi_cntrl->state_event);
+
+	mhi_cmd = mhi_cntrl->mhi_cmd;
+	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
+		spin_lock_init(&mhi_cmd->lock);
+
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (mhi_event->offload_ev)
+			continue;
+
+		mhi_event->mhi_cntrl = mhi_cntrl;
+		spin_lock_init(&mhi_event->lock);
+
+		if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+			continue;
+
+		if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
+			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
+				     (ulong)mhi_event);
+		else
+			tasklet_init(&mhi_event->task, mhi_ev_task,
+				     (ulong)mhi_event);
+	}
+
+	mhi_chan = mhi_cntrl->mhi_chan;
+	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+		mutex_init(&mhi_chan->mutex);
+		init_completion(&mhi_chan->completion);
+		rwlock_init(&mhi_chan->lock);
+	}
+
+	if (mhi_cntrl->bounce_buf) {
+		mhi_cntrl->map_single = mhi_map_single_use_bb;
+		mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
+	} else {
+		mhi_cntrl->map_single = mhi_map_single_no_bb;
+		mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
+	}
+
+	/* read the device info if possible */
+	if (mhi_cntrl->regs) {
+		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
+				   SOC_HW_VERSION_OFFS, &soc_info);
+		if (ret)
+			goto error_alloc_dev;
+
+		mhi_cntrl->family_number =
+			(soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
+			SOC_HW_VERSION_FAM_NUM_SHFT;
+		mhi_cntrl->device_number =
+			(soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
+			SOC_HW_VERSION_DEV_NUM_SHFT;
+		mhi_cntrl->major_version =
+			(soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
+			SOC_HW_VERSION_MAJOR_VER_SHFT;
+		mhi_cntrl->minor_version =
+			(soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
+			SOC_HW_VERSION_MINOR_VER_SHFT;
+	}
+
+	/* register controller with mhi_bus */
+	mhi_dev = mhi_alloc_device(mhi_cntrl);
+	if (!mhi_dev) {
+		ret = -ENOMEM;
+		goto error_alloc_dev;
+	}
+
+	mhi_dev->dev_type = MHI_CONTROLLER_TYPE;
+	mhi_dev->mhi_cntrl = mhi_cntrl;
+	dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id,
+		     mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
+
+	/* init wake source */
+	device_init_wakeup(&mhi_dev->dev, true);
+
+	ret = device_add(&mhi_dev->dev);
+	if (ret)
+		goto error_add_dev;
+
+	mhi_cntrl->mhi_dev = mhi_dev;
+
+	mhi_cntrl->parent = mhi_parent;
+
+	mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR;
+
+	/* adding it to this list only for debug purpose */
+	mutex_lock(&mhi_bus.lock);
+	list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list);
+	mutex_unlock(&mhi_bus.lock);
+
+	return 0;
+
+error_add_dev:
+	mhi_dealloc_device(mhi_cntrl, mhi_dev);
+
+error_alloc_dev:
+	kfree(mhi_cntrl->mhi_cmd);
+
+error_alloc_cmd:
+	kfree(mhi_cntrl->mhi_chan);
+	kfree(mhi_cntrl->mhi_event);
+
+	return ret;
+};
+EXPORT_SYMBOL(of_register_mhi_controller);
+
+void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+	kfree(mhi_cntrl->mhi_cmd);
+	kfree(mhi_cntrl->mhi_event);
+	kfree(mhi_cntrl->mhi_chan);
+	kfree(mhi_cntrl->mhi_tsync);
+
+	device_del(&mhi_dev->dev);
+	put_device(&mhi_dev->dev);
+
+	mutex_lock(&mhi_bus.lock);
+	list_del(&mhi_cntrl->node);
+	mutex_unlock(&mhi_bus.lock);
+}
+EXPORT_SYMBOL(mhi_unregister_mhi_controller);
+
+/* set ptr to control private data */
+static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl,
+					 void *priv)
+{
+	mhi_cntrl->priv_data = priv;
+}
+
+
+/* allocate mhi controller to register */
+struct mhi_controller *mhi_alloc_controller(size_t size)
+{
+	struct mhi_controller *mhi_cntrl;
+
+	mhi_cntrl = kzalloc(size + sizeof(*mhi_cntrl), GFP_KERNEL);
+
+	if (mhi_cntrl && size)
+		mhi_controller_set_devdata(mhi_cntrl, mhi_cntrl + 1);
+
+	return mhi_cntrl;
+}
+EXPORT_SYMBOL(mhi_alloc_controller);
+
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+	u32 bhie_off;
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	ret = mhi_init_dev_ctxt(mhi_cntrl);
+	if (ret) {
+		MHI_ERR("Error with init dev_ctxt\n");
+		goto error_dev_ctxt;
+	}
+
+	/*
+	 * allocate rddm table if specified, this table is for debug purpose
+	 * so we'll ignore erros
+	 */
+	if (mhi_cntrl->rddm_size) {
+		mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+				     mhi_cntrl->rddm_size);
+
+		/*
+		 * This controller supports rddm, we need to manually clear
+		 * BHIE RX registers since por values are undefined.
+		 */
+		if (!mhi_cntrl->bhie) {
+			ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
+					   &bhie_off);
+			if (ret) {
+				MHI_ERR("Error getting bhie offset\n");
+				goto bhie_error;
+			}
+
+			mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
+		}
+
+		memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, 0,
+			  BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + 4);
+
+		if (mhi_cntrl->rddm_image)
+			mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+	}
+
+	mhi_cntrl->pre_init = true;
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return 0;
+
+bhie_error:
+	if (mhi_cntrl->rddm_image) {
+		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+		mhi_cntrl->rddm_image = NULL;
+	}
+
+error_dev_ctxt:
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_prepare_for_power_up);
+
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
+{
+	if (mhi_cntrl->fbc_image) {
+		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+		mhi_cntrl->fbc_image = NULL;
+	}
+
+	if (mhi_cntrl->rddm_image) {
+		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+		mhi_cntrl->rddm_image = NULL;
+	}
+
+	mhi_deinit_dev_ctxt(mhi_cntrl);
+	mhi_cntrl->pre_init = false;
+}
+EXPORT_SYMBOL(mhi_unprepare_after_power_down);
+
+/* match dev to drv */
+static int mhi_match(struct device *dev, struct device_driver *drv)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+	const struct mhi_device_id *id;
+
+	/* if controller type there is no client driver associated with it */
+	if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE)
+		return 0;
+
+	for (id = mhi_drv->id_table; id->chan[0]; id++)
+		if (!strcmp(mhi_dev->chan_name, id->chan)) {
+			mhi_dev->id = id;
+			return 1;
+		}
+
+	return 0;
+};
+
+static void mhi_release_device(struct device *dev)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+
+	if (mhi_dev->ul_chan)
+		mhi_dev->ul_chan->mhi_dev = NULL;
+
+	if (mhi_dev->dl_chan)
+		mhi_dev->dl_chan->mhi_dev = NULL;
+
+	kfree(mhi_dev);
+}
+
+struct bus_type mhi_bus_type = {
+	.name = "mhi",
+	.dev_name = "mhi",
+	.match = mhi_match,
+};
+
+static int mhi_driver_probe(struct device *dev)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct device_driver *drv = dev->driver;
+	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+	struct mhi_event *mhi_event;
+	struct mhi_chan *ul_chan = mhi_dev->ul_chan;
+	struct mhi_chan *dl_chan = mhi_dev->dl_chan;
+	int ret;
+
+	/* bring device out of lpm */
+	ret = mhi_device_get_sync(mhi_dev, MHI_VOTE_DEVICE);
+	if (ret)
+		return ret;
+
+	ret = -EINVAL;
+	if (ul_chan) {
+		/* lpm notification require status_cb */
+		if (ul_chan->lpm_notify && !mhi_drv->status_cb)
+			goto exit_probe;
+
+		if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
+			goto exit_probe;
+
+		ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+		mhi_dev->status_cb = mhi_drv->status_cb;
+		if (ul_chan->auto_start) {
+			ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
+			if (ret)
+				goto exit_probe;
+		}
+	}
+
+	if (dl_chan) {
+		if (dl_chan->lpm_notify && !mhi_drv->status_cb)
+			goto exit_probe;
+
+		if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
+			goto exit_probe;
+
+		mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
+
+		/*
+		 * if this channal event ring manage by client, then
+		 * status_cb must be defined so we can send the async
+		 * cb whenever there are pending data
+		 */
+		if (mhi_event->cl_manage && !mhi_drv->status_cb)
+			goto exit_probe;
+
+		dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+		/* ul & dl uses same status cb */
+		mhi_dev->status_cb = mhi_drv->status_cb;
+	}
+
+	ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
+	if (ret)
+		goto exit_probe;
+
+	if (dl_chan && dl_chan->auto_start)
+		mhi_prepare_channel(mhi_cntrl, dl_chan);
+
+	mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+	return ret;
+
+exit_probe:
+	mhi_unprepare_from_transfer(mhi_dev);
+
+	mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+	return ret;
+}
+
+static int mhi_driver_remove(struct device *dev)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+	enum MHI_CH_STATE ch_state[] = {
+		MHI_CH_STATE_DISABLED,
+		MHI_CH_STATE_DISABLED
+	};
+	int dir;
+
+	/* control device has no work to do */
+	if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE)
+		return 0;
+
+	MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name);
+
+	/* reset both channels */
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		/* wake all threads waiting for completion */
+		write_lock_irq(&mhi_chan->lock);
+		mhi_chan->ccs = MHI_EV_CC_INVALID;
+		complete_all(&mhi_chan->completion);
+		write_unlock_irq(&mhi_chan->lock);
+
+		/* move channel state to disable, no more processing */
+		mutex_lock(&mhi_chan->mutex);
+		write_lock_irq(&mhi_chan->lock);
+		ch_state[dir] = mhi_chan->ch_state;
+		mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
+		write_unlock_irq(&mhi_chan->lock);
+
+		/* reset the channel */
+		if (!mhi_chan->offload_ch)
+			mhi_reset_chan(mhi_cntrl, mhi_chan);
+
+		mutex_unlock(&mhi_chan->mutex);
+	}
+
+	/* destroy the device */
+	mhi_drv->remove(mhi_dev);
+
+	/* de_init channel if it was enabled */
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		mutex_lock(&mhi_chan->mutex);
+
+		if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
+		    !mhi_chan->offload_ch)
+			mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+
+		mutex_unlock(&mhi_chan->mutex);
+	}
+
+
+	if (mhi_cntrl->tsync_dev == mhi_dev)
+		mhi_cntrl->tsync_dev = NULL;
+
+	/* relinquish any pending votes for device */
+	while (atomic_read(&mhi_dev->dev_vote))
+		mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+
+	/* remove pending votes for the bus */
+	while (atomic_read(&mhi_dev->bus_vote))
+		mhi_device_put(mhi_dev, MHI_VOTE_BUS);
+
+	return 0;
+}
+
+int mhi_driver_register(struct mhi_driver *mhi_drv)
+{
+	struct device_driver *driver = &mhi_drv->driver;
+
+	if (!mhi_drv->probe || !mhi_drv->remove)
+		return -EINVAL;
+
+	driver->bus = &mhi_bus_type;
+	driver->probe = mhi_driver_probe;
+	driver->remove = mhi_driver_remove;
+	return driver_register(driver);
+}
+EXPORT_SYMBOL(mhi_driver_register);
+
+void mhi_driver_unregister(struct mhi_driver *mhi_drv)
+{
+	driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL(mhi_driver_unregister);
+
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+	struct device *dev;
+
+	if (!mhi_dev)
+		return NULL;
+
+	dev = &mhi_dev->dev;
+	device_initialize(dev);
+	dev->bus = &mhi_bus_type;
+	dev->release = mhi_release_device;
+	dev->parent = mhi_cntrl->dev;
+	mhi_dev->mhi_cntrl = mhi_cntrl;
+	mhi_dev->dev_id = mhi_cntrl->dev_id;
+	mhi_dev->domain = mhi_cntrl->domain;
+	mhi_dev->bus = mhi_cntrl->bus;
+	mhi_dev->slot = mhi_cntrl->slot;
+	mhi_dev->mtu = MHI_MAX_MTU;
+	atomic_set(&mhi_dev->dev_vote, 0);
+	atomic_set(&mhi_dev->bus_vote, 0);
+
+	return mhi_dev;
+}
+
+static int __init mhi_init(void)
+{
+	int ret;
+
+	mutex_init(&mhi_bus.lock);
+	INIT_LIST_HEAD(&mhi_bus.controller_list);
+
+	/* parent directory */
+	mhi_parent = debugfs_create_dir(mhi_bus_type.name, NULL);
+
+	ret = bus_register(&mhi_bus_type);
+
+	if (!ret)
+		mhi_dtr_init();
+	return ret;
+}
+postcore_initcall(mhi_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("MHI_CORE");
+MODULE_DESCRIPTION("MHI Host Interface");
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
new file mode 100644
index 0000000..d0bc74b
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -0,0 +1,900 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MHI_INT_H
+#define _MHI_INT_H
+
+extern struct bus_type mhi_bus_type;
+
+/* MHI mmio register mapping */
+#define PCI_INVALID_READ(val) (val == U32_MAX)
+#define MHI_REG_SIZE (SZ_4K)
+
+#define MHIREGLEN (0x0)
+#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
+#define MHIREGLEN_MHIREGLEN_SHIFT (0)
+
+#define MHIVER (0x8)
+#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
+#define MHIVER_MHIVER_SHIFT (0)
+
+#define MHICFG (0x10)
+#define MHICFG_NHWER_MASK (0xFF000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xFF0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xFF00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xFF)
+#define MHICFG_NCH_SHIFT (0)
+
+#define CHDBOFF (0x18)
+#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
+#define CHDBOFF_CHDBOFF_SHIFT (0)
+
+#define ERDBOFF (0x20)
+#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
+#define ERDBOFF_ERDBOFF_SHIFT (0)
+
+#define BHIOFF (0x28)
+#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
+#define BHIOFF_BHIOFF_SHIFT (0)
+
+#define BHIEOFF (0x2C)
+#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
+#define BHIEOFF_BHIEOFF_SHIFT (0)
+
+#define DEBUGOFF (0x30)
+#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
+#define DEBUGOFF_DEBUGOFF_SHIFT (0)
+
+#define MHICTRL (0x38)
+#define MHICTRL_MHISTATE_MASK (0x0000FF00)
+#define MHICTRL_MHISTATE_SHIFT (8)
+#define MHICTRL_RESET_MASK (0x2)
+#define MHICTRL_RESET_SHIFT (1)
+
+#define MHISTATUS (0x48)
+#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
+#define MHISTATUS_MHISTATE_SHIFT (8)
+#define MHISTATUS_SYSERR_MASK (0x4)
+#define MHISTATUS_SYSERR_SHIFT (2)
+#define MHISTATUS_READY_MASK (0x1)
+#define MHISTATUS_READY_SHIFT (0)
+
+#define CCABAP_LOWER (0x58)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
+
+#define CCABAP_HIGHER (0x5C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
+
+#define ECABAP_LOWER (0x60)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
+
+#define ECABAP_HIGHER (0x64)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
+
+#define CRCBAP_LOWER (0x68)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
+
+#define CRCBAP_HIGHER (0x6C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
+
+#define CRDB_LOWER (0x70)
+#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
+#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
+
+#define CRDB_HIGHER (0x74)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
+
+#define MHICTRLBASE_LOWER (0x80)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
+
+#define MHICTRLBASE_HIGHER (0x84)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
+
+#define MHICTRLLIMIT_LOWER (0x88)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
+
+#define MHICTRLLIMIT_HIGHER (0x8C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
+
+#define MHIDATABASE_LOWER (0x98)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
+
+#define MHIDATABASE_HIGHER (0x9C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
+
+#define MHIDATALIMIT_LOWER (0xA0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
+
+#define MHIDATALIMIT_HIGHER (0xA4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
+
+/* Host request register */
+#define MHI_SOC_RESET_REQ_OFFSET (0xB0)
+#define MHI_SOC_RESET_REQ BIT(0)
+
+/* MHI misc capability registers */
+#define MISC_OFFSET (0x24)
+#define MISC_CAP_MASK (0xFFFFFFFF)
+#define MISC_CAP_SHIFT (0)
+
+#define CAP_CAPID_MASK (0xFF000000)
+#define CAP_CAPID_SHIFT (24)
+#define CAP_NEXT_CAP_MASK (0x00FFF000)
+#define CAP_NEXT_CAP_SHIFT (12)
+
+/* MHI Timesync offsets */
+#define TIMESYNC_CFG_OFFSET (0x00)
+#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK)
+#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT)
+#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK)
+#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT)
+#define TIMESYNC_CFG_NUMCMD_MASK (0xFF)
+#define TIMESYNC_CFG_NUMCMD_SHIFT (0)
+#define TIMESYNC_DB_OFFSET (0x4)
+#define TIMESYNC_TIME_LOW_OFFSET (0x8)
+#define TIMESYNC_TIME_HIGH_OFFSET (0xC)
+
+#define TIMESYNC_CAP_ID (2)
+
+/* MHI Bandwidth scaling offsets */
+#define BW_SCALE_CFG_OFFSET (0x04)
+#define BW_SCALE_CFG_CHAN_DB_ID_MASK (0xFE000000)
+#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25)
+#define BW_SCALE_CFG_ENABLED_MASK (0x01000000)
+#define BW_SCALE_CFG_ENABLED_SHIFT (24)
+#define BW_SCALE_CFG_ER_ID_MASK (0x00F80000)
+#define BW_SCALE_CFG_ER_ID_SHIFT (19)
+
+#define BW_SCALE_CAP_ID (3)
+
+/* MHI BHI offfsets */
+#define BHI_BHIVERSION_MINOR (0x00)
+#define BHI_BHIVERSION_MAJOR (0x04)
+#define BHI_IMGADDR_LOW (0x08)
+#define BHI_IMGADDR_HIGH (0x0C)
+#define BHI_IMGSIZE (0x10)
+#define BHI_RSVD1 (0x14)
+#define BHI_IMGTXDB (0x18)
+#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHI_TXDB_SEQNUM_SHFT (0)
+#define BHI_RSVD2 (0x1C)
+#define BHI_INTVEC (0x20)
+#define BHI_RSVD3 (0x24)
+#define BHI_EXECENV (0x28)
+#define BHI_STATUS (0x2C)
+#define BHI_ERRCODE (0x30)
+#define BHI_ERRDBG1 (0x34)
+#define BHI_ERRDBG2 (0x38)
+#define BHI_ERRDBG3 (0x3C)
+#define BHI_SERIALNU (0x40)
+#define BHI_SBLANTIROLLVER (0x44)
+#define BHI_NUMSEG (0x48)
+#define BHI_MSMHWID(n) (0x4C + (0x4 * n))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n))
+#define BHI_RSVD5 (0xC4)
+#define BHI_STATUS_MASK (0xC0000000)
+#define BHI_STATUS_SHIFT (30)
+#define BHI_STATUS_ERROR (3)
+#define BHI_STATUS_SUCCESS (2)
+#define BHI_STATUS_RESET (0)
+
+/* MHI BHIE offsets */
+#define BHIE_MSMSOCID_OFFS (0x0000)
+#define BHIE_TXVECADDR_LOW_OFFS (0x002C)
+#define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
+#define BHIE_TXVECSIZE_OFFS (0x0034)
+#define BHIE_TXVECDB_OFFS (0x003C)
+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECDB_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_OFFS (0x0044)
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
+#define BHIE_RXVECADDR_LOW_OFFS (0x0060)
+#define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
+#define BHIE_RXVECSIZE_OFFS (0x0068)
+#define BHIE_RXVECDB_OFFS (0x0070)
+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECDB_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_OFFS (0x0078)
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
+
+#define SOC_HW_VERSION_OFFS (0x224)
+#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
+#define SOC_HW_VERSION_FAM_NUM_SHFT (28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
+#define SOC_HW_VERSION_DEV_NUM_SHFT (16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
+#define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
+#define SOC_HW_VERSION_MINOR_VER_SHFT (0)
+
+/* timesync time calculations */
+#define LOCAL_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+				div_u64(mhi_cntrl->local_timer_freq, 10000ULL)))
+#define REMOTE_TICKS_TO_US(x) (div_u64((x) * 100ULL, \
+			       div_u64(mhi_cntrl->remote_timer_freq, 10000ULL)))
+
+struct mhi_event_ctxt {
+	u32 reserved : 8;
+	u32 intmodc : 8;
+	u32 intmodt : 16;
+	u32 ertype;
+	u32 msivec;
+
+	u64 rbase __packed __aligned(4);
+	u64 rlen __packed __aligned(4);
+	u64 rp __packed __aligned(4);
+	u64 wp __packed __aligned(4);
+};
+
+struct mhi_chan_ctxt {
+	u32 chstate : 8;
+	u32 brstmode : 2;
+	u32 pollcfg : 6;
+	u32 reserved : 16;
+	u32 chtype;
+	u32 erindex;
+
+	u64 rbase __packed __aligned(4);
+	u64 rlen __packed __aligned(4);
+	u64 rp __packed __aligned(4);
+	u64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+	u32 reserved0;
+	u32 reserved1;
+	u32 reserved2;
+
+	u64 rbase __packed __aligned(4);
+	u64 rlen __packed __aligned(4);
+	u64 rp __packed __aligned(4);
+	u64 wp __packed __aligned(4);
+};
+
+struct mhi_tre {
+	u64 ptr;
+	u32 dword[2];
+};
+
+struct bhi_vec_entry {
+	u64 dma_addr;
+	u64 size;
+};
+
+enum mhi_cmd_type {
+	MHI_CMD_TYPE_NOP = 1,
+	MHI_CMD_TYPE_RESET = 16,
+	MHI_CMD_TYPE_STOP = 17,
+	MHI_CMD_TYPE_START = 18,
+	MHI_CMD_TYPE_TSYNC = 24,
+};
+
+/* no operation command */
+#define MHI_TRE_CMD_NOOP_PTR (0)
+#define MHI_TRE_CMD_NOOP_DWORD0 (0)
+#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16)
+
+/* channel reset command */
+#define MHI_TRE_CMD_RESET_PTR (0)
+#define MHI_TRE_CMD_RESET_DWORD0 (0)
+#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
+					(MHI_CMD_TYPE_RESET << 16))
+
+/* channel stop command */
+#define MHI_TRE_CMD_STOP_PTR (0)
+#define MHI_TRE_CMD_STOP_DWORD0 (0)
+#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16))
+
+/* channel start command */
+#define MHI_TRE_CMD_START_PTR (0)
+#define MHI_TRE_CMD_START_DWORD0 (0)
+#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
+					(MHI_CMD_TYPE_START << 16))
+
+/* time sync cfg command */
+#define MHI_TRE_CMD_TSYNC_CFG_PTR (0)
+#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0)
+#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \
+					  (er << 24))
+
+#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+
+/* event descriptor macros */
+#define MHI_TRE_EV_PTR(ptr) (ptr)
+#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
+#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
+#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
+#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_TSYNC_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
+#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
+#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF)
+
+/* transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) (ptr)
+#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+	| (ieot << 9) | (ieob << 8) | chain)
+
+/* rsc transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
+#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
+
+enum MHI_CMD {
+	MHI_CMD_RESET_CHAN,
+	MHI_CMD_START_CHAN,
+	MHI_CMD_TIMSYNC_CFG,
+};
+
+enum MHI_PKT_TYPE {
+	MHI_PKT_TYPE_INVALID = 0x0,
+	MHI_PKT_TYPE_NOOP_CMD = 0x1,
+	MHI_PKT_TYPE_TRANSFER = 0x2,
+	MHI_PKT_TYPE_COALESCING = 0x8,
+	MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+	MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+	MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+	MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+	MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+	MHI_PKT_TYPE_TX_EVENT = 0x22,
+	MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+	MHI_PKT_TYPE_EE_EVENT = 0x40,
+	MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+	MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+	MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum MHI_EV_CCS {
+	MHI_EV_CC_INVALID = 0x0,
+	MHI_EV_CC_SUCCESS = 0x1,
+	MHI_EV_CC_EOT = 0x2,
+	MHI_EV_CC_OVERFLOW = 0x3,
+	MHI_EV_CC_EOB = 0x4,
+	MHI_EV_CC_OOB = 0x5,
+	MHI_EV_CC_DB_MODE = 0x6,
+	MHI_EV_CC_UNDEFINED_ERR = 0x10,
+	MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+enum MHI_CH_STATE {
+	MHI_CH_STATE_DISABLED = 0x0,
+	MHI_CH_STATE_ENABLED = 0x1,
+	MHI_CH_STATE_RUNNING = 0x2,
+	MHI_CH_STATE_SUSPENDED = 0x3,
+	MHI_CH_STATE_STOP = 0x4,
+	MHI_CH_STATE_ERROR = 0x5,
+};
+
+enum MHI_BRSTMODE {
+	MHI_BRSTMODE_DISABLE = 0x2,
+	MHI_BRSTMODE_ENABLE = 0x3,
+};
+
+#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \
+				    mode != MHI_BRSTMODE_ENABLE)
+
+extern const char * const mhi_ee_str[MHI_EE_MAX];
+#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
+			     "INVALID_EE" : mhi_ee_str[ee])
+
+#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
+			ee == MHI_EE_EDL)
+
+#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW)
+
+enum MHI_ST_TRANSITION {
+	MHI_ST_TRANSITION_PBL,
+	MHI_ST_TRANSITION_READY,
+	MHI_ST_TRANSITION_SBL,
+	MHI_ST_TRANSITION_MISSION_MODE,
+	MHI_ST_TRANSITION_MAX,
+};
+
+extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX];
+#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \
+				"INVALID_STATE" : mhi_state_tran_str[state])
+
+extern const char * const mhi_state_str[MHI_STATE_MAX];
+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
+				  !mhi_state_str[state]) ? \
+				"INVALID_STATE" : mhi_state_str[state])
+
+enum {
+	MHI_PM_BIT_DISABLE,
+	MHI_PM_BIT_POR,
+	MHI_PM_BIT_M0,
+	MHI_PM_BIT_M2,
+	MHI_PM_BIT_M3_ENTER,
+	MHI_PM_BIT_M3,
+	MHI_PM_BIT_M3_EXIT,
+	MHI_PM_BIT_FW_DL_ERR,
+	MHI_PM_BIT_SYS_ERR_DETECT,
+	MHI_PM_BIT_SYS_ERR_PROCESS,
+	MHI_PM_BIT_SHUTDOWN_PROCESS,
+	MHI_PM_BIT_LD_ERR_FATAL_DETECT,
+	MHI_PM_BIT_MAX
+};
+
+/* internal power states */
+enum MHI_PM_STATE {
+	MHI_PM_DISABLE = BIT(MHI_PM_BIT_DISABLE), /* MHI is not enabled */
+	MHI_PM_POR = BIT(MHI_PM_BIT_POR), /* reset state */
+	MHI_PM_M0 = BIT(MHI_PM_BIT_M0),
+	MHI_PM_M2 = BIT(MHI_PM_BIT_M2),
+	MHI_PM_M3_ENTER = BIT(MHI_PM_BIT_M3_ENTER),
+	MHI_PM_M3 = BIT(MHI_PM_BIT_M3),
+	MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT),
+	/* firmware download failure state */
+	MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR),
+	MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT),
+	MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS),
+	MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
+	/* link not accessible */
+	MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT),
+};
+
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+		MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
+					mhi_cntrl->db_access)
+#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+						MHI_PM_M2 | MHI_PM_M3_EXIT))
+#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
+#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+					    MHI_PM_IN_ERROR_STATE(pm_state))
+#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
+					   (MHI_PM_M3_ENTER | MHI_PM_M3))
+
+/* accepted buffer type for the channel */
+enum MHI_XFER_TYPE {
+	MHI_XFER_BUFFER,
+	MHI_XFER_SKB,
+	MHI_XFER_SCLIST,
+	MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */
+	MHI_XFER_DMA, /* receive dma address, already mapped by client */
+	MHI_XFER_RSC_DMA, /* RSC type, accept premapped buffer */
+};
+
+#define NR_OF_CMD_RINGS (1)
+#define CMD_EL_PER_RING (128)
+#define PRIMARY_CMD_RING (0)
+#define MHI_BW_SCALE_CHAN_DB (126)
+#define MHI_DEV_WAKE_DB (127)
+#define MHI_MAX_MTU (0xffff)
+
+#define MHI_BW_SCALE_SETUP(er_index) (((MHI_BW_SCALE_CHAN_DB << \
+	BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK) | \
+	((1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK) | \
+	(((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK))
+
+#define MHI_BW_SCALE_RESULT(status, seq) ((status & 0xF) << 8 | (seq & 0xFF))
+#define MHI_BW_SCALE_NACK 0xF
+
+enum MHI_ER_TYPE {
+	MHI_ER_TYPE_INVALID = 0x0,
+	MHI_ER_TYPE_VALID = 0x1,
+};
+
+enum mhi_er_priority {
+	MHI_ER_PRIORITY_HIGH,
+	MHI_ER_PRIORITY_MEDIUM,
+	MHI_ER_PRIORITY_LOW,
+};
+
+#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
+#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)
+
+enum mhi_er_data_type {
+	MHI_ER_DATA_ELEMENT_TYPE,
+	MHI_ER_CTRL_ELEMENT_TYPE,
+	MHI_ER_TSYNC_ELEMENT_TYPE,
+	MHI_ER_BW_SCALE_ELEMENT_TYPE,
+	MHI_ER_DATA_TYPE_MAX = MHI_ER_BW_SCALE_ELEMENT_TYPE,
+};
+
+enum mhi_ch_ee_mask {
+	MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
+	MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
+	MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
+	MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
+	MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
+	MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
+	MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
+};
+
+enum mhi_ch_type {
+	MHI_CH_TYPE_INVALID = 0,
+	MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
+	MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
+	MHI_CH_TYPE_INBOUND_COALESCED = 3,
+};
+
+struct db_cfg {
+	bool reset_req;
+	bool db_mode;
+	u32 pollcfg;
+	enum MHI_BRSTMODE brstmode;
+	dma_addr_t db_val;
+	void (*process_db)(struct mhi_controller *mhi_cntrl,
+			   struct db_cfg *db_cfg, void __iomem *io_addr,
+			   dma_addr_t db_val);
+};
+
+struct mhi_pm_transitions {
+	enum MHI_PM_STATE from_state;
+	u32 to_states;
+};
+
+struct state_transition {
+	struct list_head node;
+	enum MHI_ST_TRANSITION state;
+};
+
+struct mhi_ctxt {
+	struct mhi_event_ctxt *er_ctxt;
+	struct mhi_chan_ctxt *chan_ctxt;
+	struct mhi_cmd_ctxt *cmd_ctxt;
+	dma_addr_t er_ctxt_addr;
+	dma_addr_t chan_ctxt_addr;
+	dma_addr_t cmd_ctxt_addr;
+};
+
+struct mhi_ring {
+	dma_addr_t dma_handle;
+	dma_addr_t iommu_base;
+	u64 *ctxt_wp; /* point to ctxt wp */
+	void *pre_aligned;
+	void *base;
+	void *rp;
+	void *wp;
+	size_t el_size;
+	size_t len;
+	size_t elements;
+	size_t alloc_size;
+	void __iomem *db_addr;
+};
+
+struct mhi_cmd {
+	struct mhi_ring ring;
+	spinlock_t lock;
+};
+
+struct mhi_buf_info {
+	dma_addr_t p_addr;
+	void *v_addr;
+	void *bb_addr;
+	void *wp;
+	size_t len;
+	void *cb_buf;
+	bool used; /* indicate element is free to use */
+	bool pre_mapped; /* already pre-mapped by client */
+	enum dma_data_direction dir;
+};
+
+struct mhi_event {
+	struct list_head node;
+	u32 er_index;
+	u32 intmod;
+	u32 msi;
+	int chan; /* this event ring is dedicated to a channel */
+	enum mhi_er_priority priority;
+	enum mhi_er_data_type data_type;
+	struct mhi_ring ring;
+	struct db_cfg db_cfg;
+	bool hw_ring;
+	bool cl_manage;
+	bool offload_ev; /* managed by a device driver */
+	bool request_irq; /* has dedicated interrupt handler */
+	spinlock_t lock;
+	struct mhi_chan *mhi_chan; /* dedicated to channel */
+	struct tasklet_struct task;
+	int (*process_event)(struct mhi_controller *mhi_cntrl,
+			     struct mhi_event *mhi_event,
+			     u32 event_quota);
+	struct mhi_controller *mhi_cntrl;
+};
+
+struct mhi_chan {
+	u32 chan;
+	const char *name;
+	/*
+	 * important, when consuming increment tre_ring first, when releasing
+	 * decrement buf_ring first. If tre_ring has space, buf_ring
+	 * guranteed to have space so we do not need to check both rings.
+	 */
+	struct mhi_ring buf_ring;
+	struct mhi_ring tre_ring;
+	u32 er_index;
+	u32 intmod;
+	enum mhi_ch_type type;
+	enum dma_data_direction dir;
+	struct db_cfg db_cfg;
+	u32 ee_mask;
+	enum MHI_XFER_TYPE xfer_type;
+	enum MHI_CH_STATE ch_state;
+	enum MHI_EV_CCS ccs;
+	bool lpm_notify;
+	bool configured;
+	bool offload_ch;
+	bool pre_alloc;
+	bool auto_start;
+	bool wake_capable; /* channel should wake up system */
+	/* functions that generate the transfer ring elements */
+	int (*gen_tre)(struct mhi_controller *, struct mhi_chan *, void *,
+		       void *, size_t, enum MHI_FLAGS);
+	int (*queue_xfer)(struct mhi_device *, struct mhi_chan *, void *,
+			  size_t, enum MHI_FLAGS);
+	/* xfer call back */
+	struct mhi_device *mhi_dev;
+	void (*xfer_cb)(struct mhi_device *, struct mhi_result *);
+	struct mutex mutex;
+	struct completion completion;
+	rwlock_t lock;
+	struct list_head node;
+};
+
+struct tsync_node {
+	struct list_head node;
+	u32 sequence;
+	u64 local_time;
+	u64 remote_time;
+	struct mhi_device *mhi_dev;
+	void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence,
+			u64 local_time, u64 remote_time);
+};
+
+struct mhi_timesync {
+	u32 er_index;
+	void __iomem *db;
+	void __iomem *time_reg;
+	enum MHI_EV_CCS ccs;
+	struct completion completion;
+	spinlock_t lock; /* list protection */
+	struct mutex lpm_mutex; /* lpm protection */
+	struct list_head head;
+};
+
+struct mhi_bus {
+	struct list_head controller_list;
+	struct mutex lock;
+};
+
+/* default MHI timeout */
+#define MHI_TIMEOUT_MS (1000)
+extern struct mhi_bus mhi_bus;
+
+/* debug fs related functions */
+int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d);
+int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d);
+int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d);
+int mhi_debugfs_trigger_reset(void *data, u64 val);
+
+void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl);
+void mhi_init_debugfs(struct mhi_controller *mhi_cntrl);
+
+/* power management apis */
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
+					struct mhi_controller *mhi_cntrl,
+					enum MHI_PM_STATE state);
+const char *to_mhi_pm_state_str(enum MHI_PM_STATE state);
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
+		    struct mhi_chan *mhi_chan);
+enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+			       enum MHI_ST_TRANSITION state);
+void mhi_pm_st_worker(struct work_struct *work);
+void mhi_fw_load_worker(struct work_struct *work);
+void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_low_priority_worker(struct work_struct *work);
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
+void mhi_ctrl_ev_task(unsigned long data);
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
+void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason);
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+				struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+			     struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
+				 struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+				 struct mhi_event *mhi_event, u32 event_quota);
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+		 enum MHI_CMD cmd);
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
+
+/* queue transfer buffer */
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+		void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags);
+int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+		  void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+		  void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+		  void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+		  void *buf, size_t len, enum MHI_FLAGS mflags);
+int mhi_queue_dma(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
+		  void *buf, size_t len, enum MHI_FLAGS mflags);
+
+/* register access methods */
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
+		     void __iomem *db_addr, dma_addr_t wp);
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+			     struct db_cfg *db_mode, void __iomem *db_addr,
+			     dma_addr_t wp);
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+			      void __iomem *base, u32 offset, u32 *out);
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+				    void __iomem *base, u32 offset, u32 mask,
+				    u32 shift, u32 *out);
+void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
+		   u32 offset, u32 val);
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
+			 u32 offset, u32 mask, u32 shift, u32 val);
+void mhi_ring_er_db(struct mhi_event *mhi_event);
+void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
+		  dma_addr_t wp);
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+		      struct mhi_chan *mhi_chan);
+int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
+			      u32 *offset);
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
+int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
+int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
+int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_early_notify_device(struct device *dev, void *data);
+
+/* timesync log support */
+static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+
+	if (mhi_tsync && mhi_cntrl->tsync_log)
+		mhi_cntrl->tsync_log(mhi_cntrl,
+				     readq_no_log(mhi_tsync->time_reg));
+}
+
+/* memory allocation methods */
+static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl,
+				       size_t size,
+				       dma_addr_t *dma_handle,
+				       gfp_t gfp)
+{
+	void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp);
+
+	if (buf)
+		atomic_add(size, &mhi_cntrl->alloc_size);
+
+	return buf;
+}
+static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
+				     size_t size,
+				     void *vaddr,
+				     dma_addr_t dma_handle)
+{
+	atomic_sub(size, &mhi_cntrl->alloc_size);
+	dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle);
+}
+struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
+static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl,
+				      struct mhi_device *mhi_dev)
+{
+	kfree(mhi_dev);
+}
+int mhi_destroy_device(struct device *dev, void *data);
+void mhi_create_devices(struct mhi_controller *mhi_cntrl);
+int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
+			 struct image_info **image_info, size_t alloc_size);
+void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
+			 struct image_info *image_info);
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+			 struct mhi_buf_info *buf_info);
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+			  struct mhi_buf_info *buf_info);
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+			    struct mhi_buf_info *buf_info);
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+			     struct mhi_buf_info *buf_info);
+
+/* initialization methods */
+int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
+		       struct mhi_chan *mhi_chan);
+void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
+			  struct mhi_chan *mhi_chan);
+int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
+int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
+int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
+void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
+int mhi_dtr_init(void);
+void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
+		      struct image_info *img_info);
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+			struct mhi_chan *mhi_chan);
+
+/* isr handlers */
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
+irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev);
+irqreturn_t mhi_intvec_handlr(int irq_number, void *dev);
+void mhi_ev_task(unsigned long data);
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_ASSERT(cond, msg) do { \
+	if (cond) \
+		panic(msg); \
+} while (0)
+
+#else
+
+#define MHI_ASSERT(cond, msg) do { \
+	if (cond) { \
+		MHI_ERR(msg); \
+		WARN_ON(cond); \
+	} \
+} while (0)
+
+#endif
+
+#endif /* _MHI_INT_H */
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
new file mode 100644
index 0000000..7404b8e
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -0,0 +1,2388 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/mhi.h>
+#include <linux/types.h>
+#include "mhi_internal.h"
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+				    struct mhi_chan *mhi_chan);
+
+int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
+			      void __iomem *base,
+			      u32 offset,
+			      u32 *out)
+{
+	u32 tmp = readl_relaxed(base + offset);
+
+	/* unexpected value, query the link status */
+	if (PCI_INVALID_READ(tmp) &&
+	    mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data))
+		return -EIO;
+
+	*out = tmp;
+
+	return 0;
+}
+
+int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
+				    void __iomem *base,
+				    u32 offset,
+				    u32 mask,
+				    u32 shift,
+				    u32 *out)
+{
+	u32 tmp;
+	int ret;
+
+	ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+	if (ret)
+		return ret;
+
+	*out = (tmp & mask) >> shift;
+
+	return 0;
+}
+
+int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
+			      u32 capability,
+			      u32 *offset)
+{
+	u32 cur_cap, next_offset;
+	int ret;
+
+	/* get the 1st supported capability offset */
+	ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET,
+				 MISC_CAP_MASK, MISC_CAP_SHIFT, offset);
+	if (ret)
+		return ret;
+	do {
+		ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
+					 CAP_CAPID_MASK, CAP_CAPID_SHIFT,
+					 &cur_cap);
+		if (ret)
+			return ret;
+
+		if (cur_cap == capability)
+			return 0;
+
+		ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset,
+					 CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT,
+					 &next_offset);
+		if (ret)
+			return ret;
+
+		*offset = next_offset;
+		if (*offset >= MHI_REG_SIZE)
+			return -ENXIO;
+	} while (next_offset);
+
+	return -ENXIO;
+}
+
+void mhi_write_reg(struct mhi_controller *mhi_cntrl,
+		   void __iomem *base,
+		   u32 offset,
+		   u32 val)
+{
+	writel_relaxed(val, base + offset);
+}
+
+void mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
+			 void __iomem *base,
+			 u32 offset,
+			 u32 mask,
+			 u32 shift,
+			 u32 val)
+{
+	int ret;
+	u32 tmp;
+
+	ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
+	if (ret)
+		return;
+
+	tmp &= ~mask;
+	tmp |= (val << shift);
+	mhi_write_reg(mhi_cntrl, base, offset, tmp);
+}
+
+void mhi_write_db(struct mhi_controller *mhi_cntrl,
+		  void __iomem *db_addr,
+		  dma_addr_t wp)
+{
+	mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp));
+	mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp));
+}
+
+void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
+		     struct db_cfg *db_cfg,
+		     void __iomem *db_addr,
+		     dma_addr_t wp)
+{
+	if (db_cfg->db_mode) {
+		db_cfg->db_val = wp;
+		mhi_write_db(mhi_cntrl, db_addr, wp);
+		db_cfg->db_mode = 0;
+	}
+}
+
+void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
+			     struct db_cfg *db_cfg,
+			     void __iomem *db_addr,
+			     dma_addr_t wp)
+{
+	db_cfg->db_val = wp;
+	mhi_write_db(mhi_cntrl, db_addr, wp);
+}
+
+void mhi_ring_er_db(struct mhi_event *mhi_event)
+{
+	struct mhi_ring *ring = &mhi_event->ring;
+
+	mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
+				     ring->db_addr, *ring->ctxt_wp);
+}
+
+void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
+{
+	dma_addr_t db;
+	struct mhi_ring *ring = &mhi_cmd->ring;
+
+	db = ring->iommu_base + (ring->wp - ring->base);
+	*ring->ctxt_wp = db;
+	mhi_write_db(mhi_cntrl, ring->db_addr, db);
+}
+
+void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
+		      struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *ring = &mhi_chan->tre_ring;
+	dma_addr_t db;
+
+	db = ring->iommu_base + (ring->wp - ring->base);
+	*ring->ctxt_wp = db;
+	mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr,
+				    db);
+}
+
+static enum mhi_ee mhi_translate_dev_ee(struct mhi_controller *mhi_cntrl,
+					u32 dev_ee)
+{
+	enum mhi_ee i;
+
+	for (i = MHI_EE_PBL; i < MHI_EE_MAX; i++)
+		if (mhi_cntrl->ee_table[i] == dev_ee)
+			return i;
+
+	return MHI_EE_NOT_SUPPORTED;
+}
+
+enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
+{
+	u32 exec;
+	int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
+
+	return (ret) ? MHI_EE_MAX : mhi_translate_dev_ee(mhi_cntrl, exec);
+}
+
+enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
+{
+	u32 state;
+	int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
+				     MHISTATUS_MHISTATE_MASK,
+				     MHISTATUS_MHISTATE_SHIFT, &state);
+	return ret ? MHI_STATE_MAX : state;
+}
+
+int mhi_queue_sclist(struct mhi_device *mhi_dev,
+		     struct mhi_chan *mhi_chan,
+		     void *buf,
+		     size_t len,
+		     enum MHI_FLAGS mflags)
+{
+	return -EINVAL;
+}
+
+int mhi_queue_nop(struct mhi_device *mhi_dev,
+		  struct mhi_chan *mhi_chan,
+		  void *buf,
+		  size_t len,
+		  enum MHI_FLAGS mflags)
+{
+	return -EINVAL;
+}
+
+static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
+				 struct mhi_ring *ring)
+{
+	ring->wp += ring->el_size;
+	if (ring->wp >= (ring->base + ring->len))
+		ring->wp = ring->base;
+	/* smp update */
+	smp_wmb();
+}
+
+static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
+				 struct mhi_ring *ring)
+{
+	ring->rp += ring->el_size;
+	if (ring->rp >= (ring->base + ring->len))
+		ring->rp = ring->base;
+	/* smp update */
+	smp_wmb();
+}
+
+static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
+				      struct mhi_ring *ring)
+{
+	int nr_el;
+
+	if (ring->wp < ring->rp)
+		nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
+	else {
+		nr_el = (ring->rp - ring->base) / ring->el_size;
+		nr_el += ((ring->base + ring->len - ring->wp) /
+			  ring->el_size) - 1;
+	}
+	return nr_el;
+}
+
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+{
+	return (addr - ring->iommu_base) + ring->base;
+}
+
+dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr)
+{
+	return (addr - ring->base) + ring->iommu_base;
+}
+
+static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
+					struct mhi_ring *ring)
+{
+	dma_addr_t ctxt_wp;
+
+	/* update the WP */
+	ring->wp += ring->el_size;
+	ctxt_wp = *ring->ctxt_wp + ring->el_size;
+
+	if (ring->wp >= (ring->base + ring->len)) {
+		ring->wp = ring->base;
+		ctxt_wp = ring->iommu_base;
+	}
+
+	*ring->ctxt_wp = ctxt_wp;
+
+	/* update the RP */
+	ring->rp += ring->el_size;
+	if (ring->rp >= (ring->base + ring->len))
+		ring->rp = ring->base;
+
+	/* visible to other cores */
+	smp_wmb();
+}
+
+static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
+			     struct mhi_ring *ring)
+{
+	void *tmp = ring->wp + ring->el_size;
+
+	if (tmp >= (ring->base + ring->len))
+		tmp = ring->base;
+
+	return (tmp == ring->rp);
+}
+
+int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
+			 struct mhi_buf_info *buf_info)
+{
+	buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr,
+					  buf_info->len, buf_info->dir);
+	if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr))
+		return -ENOMEM;
+
+	return 0;
+}
+
+int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
+			  struct mhi_buf_info *buf_info)
+{
+	void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
+				       &buf_info->p_addr, GFP_ATOMIC);
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (buf_info->dir == DMA_TO_DEVICE)
+		memcpy(buf, buf_info->v_addr, buf_info->len);
+
+	buf_info->bb_addr = buf;
+
+	return 0;
+}
+
+void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
+			    struct mhi_buf_info *buf_info)
+{
+	dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len,
+			 buf_info->dir);
+}
+
+void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
+			    struct mhi_buf_info *buf_info)
+{
+	if (buf_info->dir == DMA_FROM_DEVICE)
+		memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
+
+	mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
+			  buf_info->p_addr);
+}
+
+int mhi_queue_skb(struct mhi_device *mhi_dev,
+		  struct mhi_chan *mhi_chan,
+		  void *buf,
+		  size_t len,
+		  enum MHI_FLAGS mflags)
+{
+	struct sk_buff *skb = buf;
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+	struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+	struct mhi_buf_info *buf_info;
+	struct mhi_tre *mhi_tre;
+	int ret;
+
+	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+		return -ENOMEM;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+		MHI_VERB("MHI is not in activate state, pm_state:%s\n",
+			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+
+		return -EIO;
+	}
+
+	/* we're in M3 or transitioning to M3 */
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+	}
+
+	/* toggle wake to exit out of M2 */
+	mhi_cntrl->wake_toggle(mhi_cntrl);
+
+	/* generate the tre */
+	buf_info = buf_ring->wp;
+	buf_info->v_addr = skb->data;
+	buf_info->cb_buf = skb;
+	buf_info->wp = tre_ring->wp;
+	buf_info->dir = mhi_chan->dir;
+	buf_info->len = len;
+	ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+	if (ret)
+		goto map_error;
+
+	mhi_tre = tre_ring->wp;
+
+	mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+	mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+
+	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
+		 (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
+		 mhi_tre->dword[0], mhi_tre->dword[1]);
+
+	/* increment WP */
+	mhi_add_ring_element(mhi_cntrl, tre_ring);
+	mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+	if (mhi_chan->dir == DMA_TO_DEVICE)
+		atomic_inc(&mhi_cntrl->pending_pkts);
+
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+		read_lock_bh(&mhi_chan->lock);
+		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+		read_unlock_bh(&mhi_chan->lock);
+	}
+
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return 0;
+
+map_error:
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return ret;
+}
+
+int mhi_queue_dma(struct mhi_device *mhi_dev,
+		  struct mhi_chan *mhi_chan,
+		  void *buf,
+		  size_t len,
+		  enum MHI_FLAGS mflags)
+{
+	struct mhi_buf *mhi_buf = buf;
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+	struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
+	struct mhi_buf_info *buf_info;
+	struct mhi_tre *mhi_tre;
+
+	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+		return -ENOMEM;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+		MHI_VERB("MHI is not in activate state, pm_state:%s\n",
+			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+
+		return -EIO;
+	}
+
+	/* we're in M3 or transitioning to M3 */
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+	}
+
+	/* toggle wake to exit out of M2 */
+	mhi_cntrl->wake_toggle(mhi_cntrl);
+
+	/* generate the tre */
+	buf_info = buf_ring->wp;
+	MHI_ASSERT(buf_info->used, "TRE Not Freed\n");
+	buf_info->p_addr = mhi_buf->dma_addr;
+	buf_info->pre_mapped = true;
+	buf_info->cb_buf = mhi_buf;
+	buf_info->wp = tre_ring->wp;
+	buf_info->dir = mhi_chan->dir;
+	buf_info->len = len;
+
+	mhi_tre = tre_ring->wp;
+
+	if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) {
+		buf_info->used = true;
+		mhi_tre->ptr =
+			MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len);
+		mhi_tre->dword[0] =
+			MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base);
+		mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1;
+	} else {
+		mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+		mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
+		mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+	}
+
+	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
+		 (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
+		 mhi_tre->dword[0], mhi_tre->dword[1]);
+
+	/* increment WP */
+	mhi_add_ring_element(mhi_cntrl, tre_ring);
+	mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+	if (mhi_chan->dir == DMA_TO_DEVICE)
+		atomic_inc(&mhi_cntrl->pending_pkts);
+
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+		read_lock_bh(&mhi_chan->lock);
+		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+		read_unlock_bh(&mhi_chan->lock);
+	}
+
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return 0;
+}
+
+int mhi_gen_tre(struct mhi_controller *mhi_cntrl,
+		struct mhi_chan *mhi_chan,
+		void *buf,
+		void *cb,
+		size_t buf_len,
+		enum MHI_FLAGS flags)
+{
+	struct mhi_ring *buf_ring, *tre_ring;
+	struct mhi_tre *mhi_tre;
+	struct mhi_buf_info *buf_info;
+	int eot, eob, chain, bei;
+	int ret;
+
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+
+	buf_info = buf_ring->wp;
+	buf_info->v_addr = buf;
+	buf_info->cb_buf = cb;
+	buf_info->wp = tre_ring->wp;
+	buf_info->dir = mhi_chan->dir;
+	buf_info->len = buf_len;
+
+	ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+	if (ret)
+		return ret;
+
+	eob = !!(flags & MHI_EOB);
+	eot = !!(flags & MHI_EOT);
+	chain = !!(flags & MHI_CHAIN);
+	bei = !!(mhi_chan->intmod);
+
+	mhi_tre = tre_ring->wp;
+	mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
+	mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+
+	MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan,
+		 (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr,
+		 mhi_tre->dword[0], mhi_tre->dword[1]);
+
+	/* increment WP */
+	mhi_add_ring_element(mhi_cntrl, tre_ring);
+	mhi_add_ring_element(mhi_cntrl, buf_ring);
+
+	return 0;
+}
+
+int mhi_queue_buf(struct mhi_device *mhi_dev,
+		  struct mhi_chan *mhi_chan,
+		  void *buf,
+		  size_t len,
+		  enum MHI_FLAGS mflags)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_ring *tre_ring;
+	unsigned long flags;
+	int ret;
+
+	/*
+	 * this check here only as a guard, it's always
+	 * possible mhi can enter error while executing rest of function,
+	 * which is not fatal so we do not need to hold pm_lock
+	 */
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+		MHI_VERB("MHI is not in active state, pm_state:%s\n",
+			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+		return -EIO;
+	}
+
+	tre_ring = &mhi_chan->tre_ring;
+	if (mhi_is_ring_full(mhi_cntrl, tre_ring))
+		return -ENOMEM;
+
+	ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+	if (unlikely(ret))
+		return ret;
+
+	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+
+	/* we're in M3 or transitioning to M3 */
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+	}
+
+	/* toggle wake to exit out of M2 */
+	mhi_cntrl->wake_toggle(mhi_cntrl);
+
+	if (mhi_chan->dir == DMA_TO_DEVICE)
+		atomic_inc(&mhi_cntrl->pending_pkts);
+
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
+		unsigned long flags;
+
+		read_lock_irqsave(&mhi_chan->lock, flags);
+		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+		read_unlock_irqrestore(&mhi_chan->lock, flags);
+	}
+
+	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+
+	return 0;
+}
+
+/* destroy specific device */
+int mhi_destroy_device(struct device *dev, void *data)
+{
+	struct mhi_device *mhi_dev;
+	struct mhi_controller *mhi_cntrl;
+
+	if (dev->bus != &mhi_bus_type)
+		return 0;
+
+	mhi_dev = to_mhi_device(dev);
+	mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	/* only destroying virtual devices thats attached to bus */
+	if (mhi_dev->dev_type ==  MHI_CONTROLLER_TYPE)
+		return 0;
+
+	MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name);
+
+	/* notify the client and remove the device from mhi bus */
+	device_del(dev);
+	put_device(dev);
+
+	return 0;
+}
+
+int mhi_early_notify_device(struct device *dev, void *data)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	/* skip early notification */
+	if (!mhi_dev->early_notif)
+		return 0;
+
+	MHI_LOG("Early notification for dev:%s\n", mhi_dev->chan_name);
+
+	mhi_notify(mhi_dev, MHI_CB_FATAL_ERROR);
+
+	return 0;
+}
+
+void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason)
+{
+	struct mhi_driver *mhi_drv;
+
+	if (!mhi_dev->dev.driver)
+		return;
+
+	mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
+
+	if (mhi_drv->status_cb)
+		mhi_drv->status_cb(mhi_dev, cb_reason);
+}
+
+static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl,
+			       struct mhi_device *mhi_dev)
+{
+	struct device_node *controller, *node;
+	const char *dt_name;
+	int ret;
+
+	controller = of_find_node_by_name(mhi_cntrl->of_node, "mhi_devices");
+	if (!controller)
+		return;
+
+	for_each_available_child_of_node(controller, node) {
+		ret = of_property_read_string(node, "mhi,chan", &dt_name);
+		if (ret)
+			continue;
+		if (!strcmp(mhi_dev->chan_name, dt_name)) {
+			mhi_dev->dev.of_node = node;
+			break;
+		}
+	}
+}
+
+static ssize_t time_show(struct device *dev,
+			 struct device_attribute *attr,
+			 char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	u64 t_host, t_device;
+	int ret;
+
+	ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device);
+	if (ret) {
+		MHI_ERR("Failed to obtain time, ret:%d\n", ret);
+		return ret;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n",
+			 t_host, t_device);
+}
+static DEVICE_ATTR_RO(time);
+
+static ssize_t time_us_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	u64 t_host, t_device;
+	int ret;
+
+	ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device);
+	if (ret) {
+		MHI_ERR("Failed to obtain time, ret:%d\n", ret);
+		return ret;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (us)\n",
+			 LOCAL_TICKS_TO_US(t_host),
+			 REMOTE_TICKS_TO_US(t_device));
+}
+static DEVICE_ATTR_RO(time_us);
+
+static struct attribute *mhi_tsync_attrs[] = {
+	&dev_attr_time.attr,
+	&dev_attr_time_us.attr,
+	NULL,
+};
+
+static const struct attribute_group mhi_tsync_group = {
+	.attrs = mhi_tsync_attrs,
+};
+
+void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl)
+{
+	if (mhi_cntrl->mhi_tsync) {
+		sysfs_remove_group(&mhi_cntrl->mhi_dev->dev.kobj,
+				   &mhi_tsync_group);
+		kfree(mhi_cntrl->mhi_tsync);
+		mhi_cntrl->mhi_tsync = NULL;
+	}
+}
+
+int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl)
+{
+	return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
+				  &mhi_tsync_group);
+}
+
+static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_device *mhi_dev;
+	int ret;
+
+	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+		return;
+
+	mhi_dev = mhi_alloc_device(mhi_cntrl);
+	if (!mhi_dev)
+		return;
+
+	mhi_dev->dev_type = MHI_TIMESYNC_TYPE;
+	mhi_dev->chan_name = "TIME_SYNC";
+	dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s", mhi_dev->dev_id,
+		     mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
+		     mhi_dev->chan_name);
+
+	/* add if there is a matching DT node */
+	mhi_assign_of_node(mhi_cntrl, mhi_dev);
+
+	ret = device_add(&mhi_dev->dev);
+	if (ret) {
+		MHI_ERR("Failed to register dev for  chan:%s\n",
+			mhi_dev->chan_name);
+		mhi_dealloc_device(mhi_cntrl, mhi_dev);
+		return;
+	}
+
+	mhi_cntrl->tsync_dev = mhi_dev;
+}
+
+/* bind mhi channels into mhi devices */
+void mhi_create_devices(struct mhi_controller *mhi_cntrl)
+{
+	int i;
+	struct mhi_chan *mhi_chan;
+	struct mhi_device *mhi_dev;
+	int ret;
+
+	/*
+	 * we need to create time sync device before creating other
+	 * devices, because client may try to capture time during
+	 * clint probe.
+	 */
+	mhi_create_time_sync_dev(mhi_cntrl);
+
+	mhi_chan = mhi_cntrl->mhi_chan;
+	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+		if (!mhi_chan->configured || mhi_chan->mhi_dev ||
+		    !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
+			continue;
+		mhi_dev = mhi_alloc_device(mhi_cntrl);
+		if (!mhi_dev)
+			return;
+
+		mhi_dev->dev_type = MHI_XFER_TYPE;
+		switch (mhi_chan->dir) {
+		case DMA_TO_DEVICE:
+			mhi_dev->ul_chan = mhi_chan;
+			mhi_dev->ul_chan_id = mhi_chan->chan;
+			mhi_dev->ul_xfer = mhi_chan->queue_xfer;
+			mhi_dev->ul_event_id = mhi_chan->er_index;
+			break;
+		case DMA_NONE:
+		case DMA_BIDIRECTIONAL:
+			mhi_dev->ul_chan_id = mhi_chan->chan;
+			mhi_dev->ul_event_id = mhi_chan->er_index;
+		case DMA_FROM_DEVICE:
+			/* we use dl_chan for offload channels */
+			mhi_dev->dl_chan = mhi_chan;
+			mhi_dev->dl_chan_id = mhi_chan->chan;
+			mhi_dev->dl_xfer = mhi_chan->queue_xfer;
+			mhi_dev->dl_event_id = mhi_chan->er_index;
+		}
+
+		mhi_chan->mhi_dev = mhi_dev;
+
+		/* check next channel if it matches */
+		if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
+			if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
+				i++;
+				mhi_chan++;
+				if (mhi_chan->dir == DMA_TO_DEVICE) {
+					mhi_dev->ul_chan = mhi_chan;
+					mhi_dev->ul_chan_id = mhi_chan->chan;
+					mhi_dev->ul_xfer = mhi_chan->queue_xfer;
+					mhi_dev->ul_event_id =
+						mhi_chan->er_index;
+				} else {
+					mhi_dev->dl_chan = mhi_chan;
+					mhi_dev->dl_chan_id = mhi_chan->chan;
+					mhi_dev->dl_xfer = mhi_chan->queue_xfer;
+					mhi_dev->dl_event_id =
+						mhi_chan->er_index;
+				}
+				mhi_chan->mhi_dev = mhi_dev;
+			}
+		}
+
+		mhi_dev->chan_name = mhi_chan->name;
+		dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u_%s",
+			     mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus,
+			     mhi_dev->slot, mhi_dev->chan_name);
+
+		/* add if there is a matching DT node */
+		mhi_assign_of_node(mhi_cntrl, mhi_dev);
+
+		/*
+		 * if set, these device should get a early notification during
+		 * early notification state
+		 */
+		mhi_dev->early_notif =
+			of_property_read_bool(mhi_dev->dev.of_node,
+					      "mhi,early-notify");
+		/* init wake source */
+		if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
+			device_init_wakeup(&mhi_dev->dev, true);
+
+		ret = device_add(&mhi_dev->dev);
+		if (ret) {
+			MHI_ERR("Failed to register dev for  chan:%s\n",
+				mhi_dev->chan_name);
+			mhi_dealloc_device(mhi_cntrl, mhi_dev);
+		}
+	}
+}
+
+static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
+			    struct mhi_tre *event,
+			    struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *buf_ring, *tre_ring;
+	u32 ev_code;
+	struct mhi_result result;
+	unsigned long flags = 0;
+
+	ev_code = MHI_TRE_GET_EV_CODE(event);
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+
+	result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+		-EOVERFLOW : 0;
+
+	/*
+	 * if it's a DB Event then we need to grab the lock
+	 * with preemption disable and as a write because we
+	 * have to update db register and another thread could
+	 * be doing same.
+	 */
+	if (ev_code >= MHI_EV_CC_OOB)
+		write_lock_irqsave(&mhi_chan->lock, flags);
+	else
+		read_lock_bh(&mhi_chan->lock);
+
+	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+		goto end_process_tx_event;
+
+	switch (ev_code) {
+	case MHI_EV_CC_OVERFLOW:
+	case MHI_EV_CC_EOB:
+	case MHI_EV_CC_EOT:
+	{
+		dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
+		struct mhi_tre *local_rp, *ev_tre;
+		void *dev_rp;
+		struct mhi_buf_info *buf_info;
+		u16 xfer_len;
+
+		/* Get the TRB this event points to */
+		ev_tre = mhi_to_virtual(tre_ring, ptr);
+
+		/* device rp after servicing the TREs */
+		dev_rp = ev_tre + 1;
+		if (dev_rp >= (tre_ring->base + tre_ring->len))
+			dev_rp = tre_ring->base;
+
+		result.dir = mhi_chan->dir;
+
+		/* local rp */
+		local_rp = tre_ring->rp;
+		while (local_rp != dev_rp) {
+			buf_info = buf_ring->rp;
+			/* if it's last tre get len from the event */
+			if (local_rp == ev_tre)
+				xfer_len = MHI_TRE_GET_EV_LEN(event);
+			else
+				xfer_len = buf_info->len;
+
+			/* unmap if it's not premapped by client */
+			if (likely(!buf_info->pre_mapped))
+				mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+
+			result.buf_addr = buf_info->cb_buf;
+			result.bytes_xferd = xfer_len;
+			mhi_del_ring_element(mhi_cntrl, buf_ring);
+			mhi_del_ring_element(mhi_cntrl, tre_ring);
+			local_rp = tre_ring->rp;
+
+			/* notify client */
+			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+			if (mhi_chan->dir == DMA_TO_DEVICE)
+				atomic_dec(&mhi_cntrl->pending_pkts);
+
+			/*
+			 * recycle the buffer if buffer is pre-allocated,
+			 * if there is error, not much we can do apart from
+			 * dropping the packet
+			 */
+			if (mhi_chan->pre_alloc) {
+				if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan,
+						  buf_info->cb_buf,
+						  buf_info->len, MHI_EOT)) {
+					MHI_ERR(
+						"Error recycling buffer for chan:%d\n",
+						mhi_chan->chan);
+					kfree(buf_info->cb_buf);
+				}
+			}
+		};
+		break;
+	} /* CC_EOT */
+	case MHI_EV_CC_OOB:
+	case MHI_EV_CC_DB_MODE:
+	{
+		unsigned long flags;
+
+		MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan);
+		mhi_chan->db_cfg.db_mode = 1;
+		read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
+		if (tre_ring->wp != tre_ring->rp &&
+		    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+		}
+		read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
+		break;
+	}
+	case MHI_EV_CC_BAD_TRE:
+		MHI_ASSERT(1, "Received BAD TRE event for ring");
+		break;
+	default:
+		MHI_CRITICAL("Unknown TX completion.\n");
+
+		break;
+	} /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
+
+end_process_tx_event:
+	if (ev_code >= MHI_EV_CC_OOB)
+		write_unlock_irqrestore(&mhi_chan->lock, flags);
+	else
+		read_unlock_bh(&mhi_chan->lock);
+
+	return 0;
+}
+
+static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
+			   struct mhi_tre *event,
+			   struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *buf_ring, *tre_ring;
+	struct mhi_buf_info *buf_info;
+	struct mhi_result result;
+	int ev_code;
+	u32 cookie; /* offset to local descriptor */
+	u16 xfer_len;
+
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+
+	ev_code = MHI_TRE_GET_EV_CODE(event);
+	cookie = MHI_TRE_GET_EV_COOKIE(event);
+	xfer_len = MHI_TRE_GET_EV_LEN(event);
+
+	/* received out of bound cookie */
+	MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n");
+
+	buf_info = buf_ring->base + cookie;
+
+	result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
+		-EOVERFLOW : 0;
+	result.bytes_xferd = xfer_len;
+	result.buf_addr = buf_info->cb_buf;
+	result.dir = mhi_chan->dir;
+
+	read_lock_bh(&mhi_chan->lock);
+
+	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
+		goto end_process_rsc_event;
+
+	MHI_ASSERT(!buf_info->used, "TRE already Freed\n");
+
+	/* notify the client */
+	mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+	/*
+	 * Note: We're arbitrarily incrementing RP even though, completion
+	 * packet we processed might not be the same one, reason we can do this
+	 * is because device guaranteed to cache descriptors in order it
+	 * receive, so even though completion event is different we can re-use
+	 * all descriptors in between.
+	 * Example:
+	 * Transfer Ring has descriptors: A, B, C, D
+	 * Last descriptor host queue is D (WP) and first descriptor
+	 * host queue is A (RP).
+	 * The completion event we just serviced is descriptor C.
+	 * Then we can safely queue descriptors to replace A, B, and C
+	 * even though host did not receive any completions.
+	 */
+	mhi_del_ring_element(mhi_cntrl, tre_ring);
+	buf_info->used = false;
+
+end_process_rsc_event:
+	read_unlock_bh(&mhi_chan->lock);
+
+	return 0;
+}
+
+static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
+				       struct mhi_tre *tre)
+{
+	dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
+	struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+	struct mhi_ring *mhi_ring = &cmd_ring->ring;
+	struct mhi_tre *cmd_pkt;
+	struct mhi_chan *mhi_chan;
+	struct mhi_timesync *mhi_tsync;
+	enum mhi_cmd_type type;
+	u32 chan;
+
+	cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
+
+	/* out of order completion received */
+	MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion");
+
+	type = MHI_TRE_GET_CMD_TYPE(cmd_pkt);
+
+	if (type == MHI_CMD_TYPE_TSYNC) {
+		mhi_tsync = mhi_cntrl->mhi_tsync;
+		mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre);
+		complete(&mhi_tsync->completion);
+	} else {
+		chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
+		mhi_chan = &mhi_cntrl->mhi_chan[chan];
+		write_lock_bh(&mhi_chan->lock);
+		mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
+		complete(&mhi_chan->completion);
+		write_unlock_bh(&mhi_chan->lock);
+	}
+
+	mhi_del_ring_element(mhi_cntrl, mhi_ring);
+}
+
+int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
+			     struct mhi_event *mhi_event,
+			     u32 event_quota)
+{
+	struct mhi_tre *dev_rp, *local_rp;
+	struct mhi_ring *ev_ring = &mhi_event->ring;
+	struct mhi_event_ctxt *er_ctxt =
+		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+	int count = 0;
+
+	/*
+	 * this is a quick check to avoid unnecessary event processing
+	 * in case we already in error state, but it's still possible
+	 * to transition to error state while processing events
+	 */
+	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+		MHI_ERR("No EV access, PM_STATE:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+
+	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+	local_rp = ev_ring->rp;
+
+	while (dev_rp != local_rp) {
+		enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+		MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
+			local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
+
+		switch (type) {
+		case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
+		{
+			enum mhi_dev_state new_state;
+
+			new_state = MHI_TRE_GET_EV_STATE(local_rp);
+
+			MHI_LOG("MHI state change event to state:%s\n",
+				TO_MHI_STATE_STR(new_state));
+
+			switch (new_state) {
+			case MHI_STATE_M0:
+				mhi_pm_m0_transition(mhi_cntrl);
+				break;
+			case MHI_STATE_M1:
+				mhi_pm_m1_transition(mhi_cntrl);
+				break;
+			case MHI_STATE_M3:
+				mhi_pm_m3_transition(mhi_cntrl);
+				break;
+			case MHI_STATE_SYS_ERR:
+			{
+				enum MHI_PM_STATE new_state;
+
+				MHI_ERR("MHI system error detected\n");
+				write_lock_irq(&mhi_cntrl->pm_lock);
+				new_state = mhi_tryset_pm_state(mhi_cntrl,
+							MHI_PM_SYS_ERR_DETECT);
+				write_unlock_irq(&mhi_cntrl->pm_lock);
+				if (new_state == MHI_PM_SYS_ERR_DETECT)
+					schedule_work(
+						&mhi_cntrl->syserr_worker);
+				break;
+			}
+			default:
+				MHI_ERR("Unsupported STE:%s\n",
+					TO_MHI_STATE_STR(new_state));
+			}
+
+			break;
+		}
+		case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
+			mhi_process_cmd_completion(mhi_cntrl, local_rp);
+			break;
+		case MHI_PKT_TYPE_EE_EVENT:
+		{
+			enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX;
+			enum mhi_ee event = MHI_TRE_GET_EV_EXECENV(local_rp);
+
+			/* convert device ee to host ee */
+			event = mhi_translate_dev_ee(mhi_cntrl, event);
+
+			MHI_LOG("MHI EE received event:%s\n",
+				TO_MHI_EXEC_STR(event));
+			switch (event) {
+			case MHI_EE_SBL:
+				st = MHI_ST_TRANSITION_SBL;
+				break;
+			case MHI_EE_WFW:
+			case MHI_EE_AMSS:
+				st = MHI_ST_TRANSITION_MISSION_MODE;
+				break;
+			case MHI_EE_RDDM:
+				mhi_cntrl->status_cb(mhi_cntrl,
+						     mhi_cntrl->priv_data,
+						     MHI_CB_EE_RDDM);
+				write_lock_irq(&mhi_cntrl->pm_lock);
+				mhi_cntrl->ee = event;
+				write_unlock_irq(&mhi_cntrl->pm_lock);
+				wake_up_all(&mhi_cntrl->state_event);
+				break;
+			default:
+				MHI_ERR("Unhandled EE event:%s\n",
+					TO_MHI_EXEC_STR(event));
+			}
+			if (st != MHI_ST_TRANSITION_MAX)
+				mhi_queue_state_transition(mhi_cntrl, st);
+
+			break;
+		}
+		default:
+			MHI_ERR("Unhandled Event: 0x%x\n", type);
+			break;
+		}
+
+		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+		local_rp = ev_ring->rp;
+		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+		count++;
+	}
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_ring_er_db(mhi_event);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+	return count;
+}
+
+int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
+				struct mhi_event *mhi_event,
+				u32 event_quota)
+{
+	struct mhi_tre *dev_rp, *local_rp;
+	struct mhi_ring *ev_ring = &mhi_event->ring;
+	struct mhi_event_ctxt *er_ctxt =
+		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+	int count = 0;
+	u32 chan;
+	struct mhi_chan *mhi_chan;
+
+	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+		MHI_ERR("No EV access, PM_STATE:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+
+	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+	local_rp = ev_ring->rp;
+
+	while (dev_rp != local_rp && event_quota > 0) {
+		enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
+
+		MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
+			local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
+
+		chan = MHI_TRE_GET_EV_CHID(local_rp);
+		mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+		if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+			parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+			event_quota--;
+		} else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+			parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+			event_quota--;
+		}
+
+		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+		local_rp = ev_ring->rp;
+		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+		count++;
+	}
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_ring_er_db(mhi_event);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+	return count;
+}
+
+int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
+				 struct mhi_event *mhi_event,
+				 u32 event_quota)
+{
+	struct mhi_tre *dev_rp, *local_rp;
+	struct mhi_ring *ev_ring = &mhi_event->ring;
+	struct mhi_event_ctxt *er_ctxt =
+		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+	int count = 0;
+	u32 sequence;
+	u64 remote_time;
+
+	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+		MHI_ERR("No EV access, PM_STATE:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		return -EIO;
+	}
+
+	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+	local_rp = ev_ring->rp;
+
+	while (dev_rp != local_rp) {
+		enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp);
+		struct tsync_node *tsync_node;
+
+		MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n",
+			local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
+
+		MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event");
+
+		sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp);
+		remote_time = MHI_TRE_GET_EV_TIME(local_rp);
+
+		do {
+			spin_lock_irq(&mhi_tsync->lock);
+			tsync_node = list_first_entry_or_null(&mhi_tsync->head,
+						      struct tsync_node, node);
+			MHI_ASSERT(!tsync_node, "Unexpected Event");
+
+			if (unlikely(!tsync_node))
+				break;
+
+			list_del(&tsync_node->node);
+			spin_unlock_irq(&mhi_tsync->lock);
+
+			/*
+			 * device may not able to process all time sync commands
+			 * host issue and only process last command it receive
+			 */
+			if (tsync_node->sequence == sequence) {
+				tsync_node->cb_func(tsync_node->mhi_dev,
+						    sequence,
+						    tsync_node->local_time,
+						    remote_time);
+				kfree(tsync_node);
+			} else {
+				kfree(tsync_node);
+			}
+		} while (true);
+
+		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+		local_rp = ev_ring->rp;
+		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+		count++;
+	}
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_ring_er_db(mhi_event);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+	return count;
+}
+
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+				 struct mhi_event *mhi_event,
+				 u32 event_quota)
+{
+	struct mhi_tre *dev_rp;
+	struct mhi_ring *ev_ring = &mhi_event->ring;
+	struct mhi_event_ctxt *er_ctxt =
+		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+	struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
+	int result, ret = 0;
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+		MHI_LOG("No EV access, PM_STATE:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto exit_bw_process;
+	}
+
+	/*
+	 * BW change is not process during suspend since we're suspending link,
+	 * host will process it during resume
+	 */
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		ret = -EACCES;
+		goto exit_bw_process;
+	}
+
+	spin_lock_bh(&mhi_event->lock);
+	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+	if (ev_ring->rp == dev_rp) {
+		spin_unlock_bh(&mhi_event->lock);
+		goto exit_bw_process;
+	}
+
+	/* if rp points to base, we need to wrap it around */
+	if (dev_rp == ev_ring->base)
+		dev_rp = ev_ring->base + ev_ring->len;
+	dev_rp--;
+
+	MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT,
+		   "!BW SCALE REQ event");
+
+	link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
+	link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
+	link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
+
+	MHI_VERB("Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
+		 link_info.sequence_num,
+		 link_info.target_link_speed,
+		 link_info.target_link_width);
+
+	/* fast forward to currently processed element and recycle er */
+	ev_ring->rp = dev_rp;
+	ev_ring->wp = dev_rp - 1;
+	if (ev_ring->wp < ev_ring->base)
+		ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+	mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_ring_er_db(mhi_event);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	spin_unlock_bh(&mhi_event->lock);
+
+	ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
+	if (!ret)
+		*cur_info = link_info;
+
+	result = ret ? MHI_BW_SCALE_NACK : 0;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
+			      MHI_BW_SCALE_RESULT(result,
+						  link_info.sequence_num));
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+exit_bw_process:
+	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return ret;
+}
+
+void mhi_ev_task(unsigned long data)
+{
+	struct mhi_event *mhi_event = (struct mhi_event *)data;
+	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+
+	MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index);
+
+	/* process all pending events */
+	spin_lock_bh(&mhi_event->lock);
+	mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+	spin_unlock_bh(&mhi_event->lock);
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+	struct mhi_event *mhi_event = (struct mhi_event *)data;
+	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+	enum mhi_dev_state state;
+	enum MHI_PM_STATE pm_state = 0;
+	int ret;
+
+	MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index);
+
+	/*
+	 * we can check pm_state w/o a lock here because there is no way
+	 * pm_state can change from reg access valid to no access while this
+	 * therad being executed.
+	 */
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+		/*
+		 * we may have a pending event but not allowed to
+		 * process it since we probably in a suspended state,
+		 * trigger a resume.
+		 */
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+		return;
+	}
+
+	/* process ctrl events events */
+	ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+
+	/*
+	 * we received a MSI but no events to process maybe device went to
+	 * SYS_ERR state, check the state
+	 */
+	if (!ret) {
+		write_lock_irq(&mhi_cntrl->pm_lock);
+		state = mhi_get_mhi_state(mhi_cntrl);
+		if (state == MHI_STATE_SYS_ERR) {
+			MHI_ERR("MHI system error detected\n");
+			pm_state = mhi_tryset_pm_state(mhi_cntrl,
+						       MHI_PM_SYS_ERR_DETECT);
+		}
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		if (pm_state == MHI_PM_SYS_ERR_DETECT)
+			schedule_work(&mhi_cntrl->syserr_worker);
+	}
+}
+
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev)
+{
+	struct mhi_event *mhi_event = dev;
+	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
+	struct mhi_event_ctxt *er_ctxt =
+		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+	struct mhi_ring *ev_ring = &mhi_event->ring;
+	void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+	/* confirm ER has pending events to process before scheduling work */
+	if (ev_ring->rp == dev_rp)
+		return IRQ_HANDLED;
+
+	/* client managed event ring, notify pending data */
+	if (mhi_event->cl_manage) {
+		struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
+		struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
+
+		if (mhi_dev)
+			mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA);
+
+		return IRQ_HANDLED;
+	}
+
+	if (IS_MHI_ER_PRIORITY_HIGH(mhi_event))
+		tasklet_hi_schedule(&mhi_event->task);
+	else
+		tasklet_schedule(&mhi_event->task);
+
+	return IRQ_HANDLED;
+}
+
+/* this is the threaded fn */
+irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev)
+{
+	struct mhi_controller *mhi_cntrl = dev;
+	enum mhi_dev_state state = MHI_STATE_MAX;
+	enum MHI_PM_STATE pm_state = 0;
+	enum mhi_ee ee = 0;
+
+	MHI_VERB("Enter\n");
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+		state = mhi_get_mhi_state(mhi_cntrl);
+		ee = mhi_cntrl->ee;
+		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+		MHI_LOG("device ee:%s dev_state:%s\n",
+			TO_MHI_EXEC_STR(mhi_cntrl->ee),
+			TO_MHI_STATE_STR(state));
+	}
+
+	if (state == MHI_STATE_SYS_ERR) {
+		MHI_ERR("MHI system error detected\n");
+		pm_state = mhi_tryset_pm_state(mhi_cntrl,
+					       MHI_PM_SYS_ERR_DETECT);
+	}
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/* if device in rddm don't bother processing sys error */
+	if (mhi_cntrl->ee == MHI_EE_RDDM) {
+		if (mhi_cntrl->ee != ee) {
+			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+					     MHI_CB_EE_RDDM);
+			wake_up_all(&mhi_cntrl->state_event);
+		}
+		goto exit_intvec;
+	}
+
+	if (pm_state == MHI_PM_SYS_ERR_DETECT) {
+		wake_up_all(&mhi_cntrl->state_event);
+
+		/* for fatal errors, we let controller decide next step */
+		if (MHI_IN_PBL(ee))
+			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+					     MHI_CB_FATAL_ERROR);
+		else
+			schedule_work(&mhi_cntrl->syserr_worker);
+	}
+
+exit_intvec:
+	MHI_VERB("Exit\n");
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
+{
+
+	struct mhi_controller *mhi_cntrl = dev;
+
+	/* wake up any events waiting for state change */
+	MHI_VERB("Enter\n");
+	wake_up_all(&mhi_cntrl->state_event);
+	MHI_VERB("Exit\n");
+
+	schedule_work(&mhi_cntrl->low_priority_worker);
+
+	return IRQ_WAKE_THREAD;
+}
+
+int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
+		 struct mhi_chan *mhi_chan,
+		 enum MHI_CMD cmd)
+{
+	struct mhi_tre *cmd_tre = NULL;
+	struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+	struct mhi_ring *ring = &mhi_cmd->ring;
+	int chan = 0;
+
+	MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n",
+		 to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		 TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	if (mhi_chan)
+		chan = mhi_chan->chan;
+
+	spin_lock_bh(&mhi_cmd->lock);
+	if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
+		spin_unlock_bh(&mhi_cmd->lock);
+		return -ENOMEM;
+	}
+
+	/* prepare the cmd tre */
+	cmd_tre = ring->wp;
+	switch (cmd) {
+	case MHI_CMD_RESET_CHAN:
+		cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
+		cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
+		cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
+		break;
+	case MHI_CMD_START_CHAN:
+		cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
+		cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
+		cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
+		break;
+	case MHI_CMD_TIMSYNC_CFG:
+		cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR;
+		cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0;
+		cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1
+			(mhi_cntrl->mhi_tsync->er_index);
+		break;
+	}
+
+
+	MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n",
+		 (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr,
+		 cmd_tre->dword[0], cmd_tre->dword[1]);
+
+	/* queue to hardware */
+	mhi_add_ring_element(mhi_cntrl, ring);
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+		mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	spin_unlock_bh(&mhi_cmd->lock);
+
+	return 0;
+}
+
+int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
+			struct mhi_chan *mhi_chan)
+{
+	int ret = 0;
+
+	MHI_LOG("Entered: preparing channel:%d\n", mhi_chan->chan);
+
+	if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
+		MHI_ERR("Current EE:%s Required EE Mask:0x%x for chan:%s\n",
+			TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
+			mhi_chan->name);
+		return -ENOTCONN;
+	}
+
+	mutex_lock(&mhi_chan->mutex);
+
+	/* if channel is not disable state do not allow to start */
+	if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
+		ret = -EIO;
+		MHI_LOG("channel:%d is not in disabled state, ch_state%d\n",
+			mhi_chan->chan, mhi_chan->ch_state);
+		goto error_init_chan;
+	}
+
+	/* client manages channel context for offload channels */
+	if (!mhi_chan->offload_ch) {
+		ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
+		if (ret) {
+			MHI_ERR("Error with init chan\n");
+			goto error_init_chan;
+		}
+	}
+
+	reinit_completion(&mhi_chan->completion);
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("MHI host is not in active state\n");
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		ret = -EIO;
+		goto error_pm_state;
+	}
+
+	mhi_cntrl->wake_toggle(mhi_cntrl);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
+	if (ret) {
+		MHI_ERR("Failed to send start chan cmd\n");
+		goto error_pm_state;
+	}
+
+	ret = wait_for_completion_timeout(&mhi_chan->completion,
+				msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
+		MHI_ERR("Failed to receive cmd completion for chan:%d\n",
+			mhi_chan->chan);
+		ret = -EIO;
+		goto error_pm_state;
+	}
+
+	write_lock_irq(&mhi_chan->lock);
+	mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
+	write_unlock_irq(&mhi_chan->lock);
+
+	/* pre allocate buffer for xfer ring */
+	if (mhi_chan->pre_alloc) {
+		int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
+						       &mhi_chan->tre_ring);
+		size_t len = mhi_cntrl->buffer_len;
+
+		while (nr_el--) {
+			void *buf;
+
+			buf = kmalloc(len, GFP_KERNEL);
+			if (!buf) {
+				ret = -ENOMEM;
+				goto error_pre_alloc;
+			}
+
+			/* prepare transfer descriptors */
+			ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf,
+						len, MHI_EOT);
+			if (ret) {
+				MHI_ERR("Chan:%d error prepare buffer\n",
+					mhi_chan->chan);
+				kfree(buf);
+				goto error_pre_alloc;
+			}
+		}
+
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
+			read_lock_irq(&mhi_chan->lock);
+			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+			read_unlock_irq(&mhi_chan->lock);
+		}
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+	}
+
+	mutex_unlock(&mhi_chan->mutex);
+
+	MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan);
+
+	return 0;
+
+error_pm_state:
+	if (!mhi_chan->offload_ch)
+		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+
+error_init_chan:
+	mutex_unlock(&mhi_chan->mutex);
+
+	return ret;
+
+error_pre_alloc:
+	mutex_unlock(&mhi_chan->mutex);
+	__mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+
+	return ret;
+}
+
+static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
+				  struct mhi_event *mhi_event,
+				  struct mhi_event_ctxt *er_ctxt,
+				  int chan)
+{
+	struct mhi_tre *dev_rp, *local_rp;
+	struct mhi_ring *ev_ring;
+	unsigned long flags;
+
+	MHI_LOG("Marking all events for chan:%d as stale\n", chan);
+
+	ev_ring = &mhi_event->ring;
+
+	/* mark all stale events related to channel as STALE event */
+	spin_lock_irqsave(&mhi_event->lock, flags);
+	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+	local_rp = ev_ring->rp;
+	while (dev_rp != local_rp) {
+		if (MHI_TRE_GET_EV_TYPE(local_rp) ==
+		    MHI_PKT_TYPE_TX_EVENT &&
+		    chan == MHI_TRE_GET_EV_CHID(local_rp))
+			local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
+					MHI_PKT_TYPE_STALE_EVENT);
+		local_rp++;
+		if (local_rp == (ev_ring->base + ev_ring->len))
+			local_rp = ev_ring->base;
+	}
+
+
+	MHI_LOG("Finished marking events as stale events\n");
+	spin_unlock_irqrestore(&mhi_event->lock, flags);
+}
+
+static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
+				struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *buf_ring, *tre_ring;
+	struct mhi_result result;
+
+	/* reset any pending buffers */
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+	result.transaction_status = -ENOTCONN;
+	result.bytes_xferd = 0;
+	while (tre_ring->rp != tre_ring->wp) {
+		struct mhi_buf_info *buf_info = buf_ring->rp;
+
+		if (mhi_chan->dir == DMA_TO_DEVICE)
+			atomic_dec(&mhi_cntrl->pending_pkts);
+
+		if (!buf_info->pre_mapped)
+			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
+		mhi_del_ring_element(mhi_cntrl, buf_ring);
+		mhi_del_ring_element(mhi_cntrl, tre_ring);
+
+		if (mhi_chan->pre_alloc) {
+			kfree(buf_info->cb_buf);
+		} else {
+			result.buf_addr = buf_info->cb_buf;
+			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+		}
+	}
+}
+
+static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl,
+			       struct mhi_chan *mhi_chan)
+{
+	struct mhi_ring *buf_ring, *tre_ring;
+	struct mhi_result result;
+	struct mhi_buf_info *buf_info;
+
+	/* reset any pending buffers */
+	buf_ring = &mhi_chan->buf_ring;
+	tre_ring = &mhi_chan->tre_ring;
+	result.transaction_status = -ENOTCONN;
+	result.bytes_xferd = 0;
+
+	buf_info = buf_ring->base;
+	for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) {
+		if (!buf_info->used)
+			continue;
+
+		result.buf_addr = buf_info->cb_buf;
+		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+		buf_info->used = false;
+	}
+}
+
+void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
+{
+
+	struct mhi_event *mhi_event;
+	struct mhi_event_ctxt *er_ctxt;
+	int chan = mhi_chan->chan;
+
+	/* nothing to reset, client don't queue buffers */
+	if (mhi_chan->offload_ch)
+		return;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+	er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
+
+	mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
+
+	if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA)
+		mhi_reset_rsc_chan(mhi_cntrl, mhi_chan);
+	else
+		mhi_reset_data_chan(mhi_cntrl, mhi_chan);
+
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	MHI_LOG("Reset complete.\n");
+}
+
+static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
+				    struct mhi_chan *mhi_chan)
+{
+	int ret;
+
+	MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan);
+
+	/* no more processing events for this channel */
+	mutex_lock(&mhi_chan->mutex);
+	write_lock_irq(&mhi_chan->lock);
+	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+		MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan);
+		write_unlock_irq(&mhi_chan->lock);
+		mutex_unlock(&mhi_chan->mutex);
+		return;
+	}
+
+	mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
+	write_unlock_irq(&mhi_chan->lock);
+
+	reinit_completion(&mhi_chan->completion);
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		goto error_invalid_state;
+	}
+
+	mhi_cntrl->wake_toggle(mhi_cntrl);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
+	if (ret) {
+		MHI_ERR("Failed to send reset chan cmd\n");
+		goto error_invalid_state;
+	}
+
+	/* even if it fails we will still reset */
+	ret = wait_for_completion_timeout(&mhi_chan->completion,
+				msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
+		MHI_ERR("Failed to receive cmd completion, still resetting\n");
+
+error_invalid_state:
+	if (!mhi_chan->offload_ch) {
+		mhi_reset_chan(mhi_cntrl, mhi_chan);
+		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
+	}
+	MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan);
+	mutex_unlock(&mhi_chan->mutex);
+}
+
+int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d)
+{
+	struct mhi_controller *mhi_cntrl = m->private;
+
+	seq_printf(m,
+		   "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u M3_Fast:%u wake:%d dev_wake:%u alloc_size:%u pending_pkts:%u\n",
+		   to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		   TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		   TO_MHI_EXEC_STR(mhi_cntrl->ee),
+		   mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3,
+		   mhi_cntrl->M3_FAST, mhi_cntrl->wake_set,
+		   atomic_read(&mhi_cntrl->dev_wake),
+		   atomic_read(&mhi_cntrl->alloc_size),
+		   atomic_read(&mhi_cntrl->pending_pkts));
+	return 0;
+}
+
+int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d)
+{
+	struct mhi_controller *mhi_cntrl = m->private;
+	struct mhi_event *mhi_event;
+	struct mhi_event_ctxt *er_ctxt;
+
+	int i;
+
+	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+		     mhi_event++) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		if (mhi_event->offload_ev) {
+			seq_printf(m, "Index:%d offload event ring\n", i);
+		} else {
+			seq_printf(m,
+				   "Index:%d modc:%d modt:%d base:0x%0llx len:0x%llx",
+				   i, er_ctxt->intmodc, er_ctxt->intmodt,
+				   er_ctxt->rbase, er_ctxt->rlen);
+			seq_printf(m,
+				   " rp:0x%llx wp:0x%llx local_rp:0x%llx db:0x%llx\n",
+				   er_ctxt->rp, er_ctxt->wp,
+				   (u64)mhi_to_physical(ring, ring->rp),
+				   (u64)mhi_event->db_cfg.db_val);
+		}
+	}
+
+	return 0;
+}
+
+int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d)
+{
+	struct mhi_controller *mhi_cntrl = m->private;
+	struct mhi_chan *mhi_chan;
+	struct mhi_chan_ctxt *chan_ctxt;
+	int i;
+
+	mhi_chan = mhi_cntrl->mhi_chan;
+	chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt;
+	for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
+		struct mhi_ring *ring = &mhi_chan->tre_ring;
+
+		if (mhi_chan->offload_ch) {
+			seq_printf(m, "%s(%u) offload channel\n",
+				   mhi_chan->name, mhi_chan->chan);
+		} else if (mhi_chan->mhi_dev) {
+			seq_printf(m,
+				   "%s(%u) state:0x%x brstmode:0x%x pllcfg:0x%x type:0x%x erindex:%u",
+				   mhi_chan->name, mhi_chan->chan,
+				   chan_ctxt->chstate, chan_ctxt->brstmode,
+				   chan_ctxt->pollcfg, chan_ctxt->chtype,
+				   chan_ctxt->erindex);
+			seq_printf(m,
+				   " base:0x%llx len:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n",
+				   chan_ctxt->rbase, chan_ctxt->rlen,
+				   chan_ctxt->wp,
+				   (u64)mhi_to_physical(ring, ring->rp),
+				   (u64)mhi_to_physical(ring, ring->wp),
+				   (u64)mhi_chan->db_cfg.db_val);
+		}
+	}
+
+	return 0;
+}
+
+/* move channel to start state */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+	int ret, dir;
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+		if (ret) {
+			MHI_ERR("Error moving chan %s,%d to START state\n",
+				mhi_chan->name, mhi_chan->chan);
+			goto error_open_chan;
+		}
+	}
+
+	return 0;
+
+error_open_chan:
+	for (--dir; dir >= 0; dir--) {
+		mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		__mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_prepare_for_transfer);
+
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan;
+	int dir;
+
+	for (dir = 0; dir < 2; dir++) {
+		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+		if (!mhi_chan)
+			continue;
+
+		__mhi_unprepare_channel(mhi_cntrl, mhi_chan);
+	}
+}
+EXPORT_SYMBOL(mhi_unprepare_from_transfer);
+
+int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
+				enum dma_data_direction dir)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+		mhi_dev->ul_chan : mhi_dev->dl_chan;
+	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+	return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL(mhi_get_no_free_descriptors);
+
+static int __mhi_bdf_to_controller(struct device *dev, void *tmp)
+{
+	struct mhi_device *mhi_dev = to_mhi_device(dev);
+	struct mhi_device *match = tmp;
+
+	/* return any none-zero value if match */
+	if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE &&
+	    mhi_dev->domain == match->domain && mhi_dev->bus == match->bus &&
+	    mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id)
+		return 1;
+
+	return 0;
+}
+
+struct mhi_controller *mhi_bdf_to_controller(u32 domain,
+					     u32 bus,
+					     u32 slot,
+					     u32 dev_id)
+{
+	struct mhi_device tmp, *mhi_dev;
+	struct device *dev;
+
+	tmp.domain = domain;
+	tmp.bus = bus;
+	tmp.slot = slot;
+	tmp.dev_id = dev_id;
+
+	dev = bus_find_device(&mhi_bus_type, NULL, &tmp,
+			      __mhi_bdf_to_controller);
+	if (!dev)
+		return NULL;
+
+	mhi_dev = to_mhi_device(dev);
+
+	return mhi_dev->mhi_cntrl;
+}
+EXPORT_SYMBOL(mhi_bdf_to_controller);
+
+int mhi_poll(struct mhi_device *mhi_dev,
+	     u32 budget)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
+	struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+	int ret;
+
+	spin_lock_bh(&mhi_event->lock);
+	ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
+	spin_unlock_bh(&mhi_event->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_poll);
+
+int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
+			     u64 *t_host,
+			     u64 *t_dev)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+	int ret;
+
+	/* not all devices support time feature */
+	if (!mhi_tsync)
+		return -EIO;
+
+	/* bring to M0 state */
+	ret = __mhi_device_get_sync(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	mutex_lock(&mhi_tsync->lpm_mutex);
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+		MHI_ERR("MHI is not in active state, pm_state:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto error_invalid_state;
+	}
+
+	/* disable link level low power modes */
+	ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
+	if (ret)
+		goto error_invalid_state;
+
+	/*
+	 * time critical code to fetch device times,
+	 * delay between these two steps should be
+	 * deterministic as possible.
+	 */
+	preempt_disable();
+	local_irq_disable();
+
+	*t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
+	*t_dev = readq_relaxed_no_log(mhi_tsync->time_reg);
+
+	local_irq_enable();
+	preempt_enable();
+
+	mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
+
+error_invalid_state:
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mutex_unlock(&mhi_tsync->lpm_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_get_remote_time_sync);
+
+/**
+ * mhi_get_remote_time - Get external modem time relative to host time
+ * Trigger event to capture modem time, also capture host time so client
+ * can do a relative drift comparision.
+ * Recommended only tsync device calls this method and do not call this
+ * from atomic context
+ * @mhi_dev: Device associated with the channels
+ * @sequence:unique sequence id track event
+ * @cb_func: callback function to call back
+ */
+int mhi_get_remote_time(struct mhi_device *mhi_dev,
+			u32 sequence,
+			void (*cb_func)(struct mhi_device *mhi_dev,
+					u32 sequence,
+					u64 local_time,
+					u64 remote_time))
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync;
+	struct tsync_node *tsync_node;
+	int ret;
+
+	/* not all devices support time feature */
+	if (!mhi_tsync)
+		return -EIO;
+
+	/* tsync db can only be rung in M0 state */
+	ret = __mhi_device_get_sync(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	/*
+	 * technically we can use GFP_KERNEL, but wants to avoid
+	 * # of times scheduling out
+	 */
+	tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC);
+	if (!tsync_node) {
+		ret = -ENOMEM;
+		goto error_no_mem;
+	}
+
+	tsync_node->sequence = sequence;
+	tsync_node->cb_func = cb_func;
+	tsync_node->mhi_dev = mhi_dev;
+
+	/* disable link level low power modes */
+	mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
+		MHI_ERR("MHI is not in active state, pm_state:%s\n",
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto error_invalid_state;
+	}
+
+	spin_lock_irq(&mhi_tsync->lock);
+	list_add_tail(&tsync_node->node, &mhi_tsync->head);
+	spin_unlock_irq(&mhi_tsync->lock);
+
+	/*
+	 * time critical code, delay between these two steps should be
+	 * deterministic as possible.
+	 */
+	preempt_disable();
+	local_irq_disable();
+
+	tsync_node->local_time =
+		mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data);
+	writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db);
+	/* write must go thru immediately */
+	wmb();
+
+	local_irq_enable();
+	preempt_enable();
+
+	ret = 0;
+
+error_invalid_state:
+	if (ret)
+		kfree(tsync_node);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
+
+error_no_mem:
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_get_remote_time);
+
+void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl)
+{
+	enum mhi_dev_state state;
+	enum mhi_ee ee;
+	int i, ret;
+	u32 val;
+	void __iomem *mhi_base = mhi_cntrl->regs;
+	void __iomem *bhi_base = mhi_cntrl->bhi;
+	void __iomem *bhie_base = mhi_cntrl->bhie;
+	void __iomem *wake_db = mhi_cntrl->wake_db;
+	struct {
+		const char *name;
+		int offset;
+		void *base;
+	} debug_reg[] = {
+		{ "MHI_CNTRL", MHICTRL, mhi_base},
+		{ "MHI_STATUS", MHISTATUS, mhi_base},
+		{ "MHI_WAKE_DB", 0, wake_db},
+		{ "BHI_EXECENV", BHI_EXECENV, bhi_base},
+		{ "BHI_STATUS", BHI_STATUS, bhi_base},
+		{ "BHI_ERRCODE", BHI_ERRCODE, bhi_base},
+		{ "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base},
+		{ "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base},
+		{ "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base},
+		{ "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base},
+		{ "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base},
+		{ "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base},
+		{ "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base},
+		{ NULL },
+	};
+
+	MHI_LOG("host pm_state:%s dev_state:%s ee:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	state = mhi_get_mhi_state(mhi_cntrl);
+	ee = mhi_get_exec_env(mhi_cntrl);
+
+	MHI_LOG("device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee),
+		TO_MHI_STATE_STR(state));
+
+	for (i = 0; debug_reg[i].name; i++) {
+		ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base,
+				   debug_reg[i].offset, &val);
+		MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val,
+			ret);
+	}
+}
+EXPORT_SYMBOL(mhi_debug_reg_dump);
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
new file mode 100644
index 0000000..3fe6545
--- /dev/null
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -0,0 +1,1522 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mhi.h>
+#include "mhi_internal.h"
+
+/*
+ * Not all MHI states transitions are sync transitions. Linkdown, SSR, and
+ * shutdown can happen anytime asynchronously. This function will transition to
+ * new state only if we're allowed to transitions.
+ *
+ * Priority increase as we go down, example while in any states from L0, start
+ * state from L1, L2, or L3 can be set.  Notable exception to this rule is state
+ * DISABLE.  From DISABLE state we can transition to only POR or state.  Also
+ * for example while in L2 state, user cannot jump back to L1 or L0 states.
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ *     POR <--> POR
+ *     POR -> M0 -> M2 --> M0
+ *     POR -> FW_DL_ERR
+ *     FW_DL_ERR <--> FW_DL_ERR
+ *     M0 <--> M0
+ *     M0 -> FW_DL_ERR
+ *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ *     LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static struct mhi_pm_transitions const mhi_state_transitions[] = {
+	/* L0 States */
+	{
+		MHI_PM_DISABLE,
+		MHI_PM_POR
+	},
+	{
+		MHI_PM_POR,
+		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+	},
+	{
+		MHI_PM_M0,
+		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
+		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+	},
+	{
+		MHI_PM_M2,
+		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	{
+		MHI_PM_M3_ENTER,
+		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	{
+		MHI_PM_M3,
+		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	{
+		MHI_PM_M3_EXIT,
+		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	{
+		MHI_PM_FW_DL_ERR,
+		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
+		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	/* L1 States */
+	{
+		MHI_PM_SYS_ERR_DETECT,
+		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	{
+		MHI_PM_SYS_ERR_PROCESS,
+		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+		MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	/* L2 States */
+	{
+		MHI_PM_SHUTDOWN_PROCESS,
+		MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+	},
+	/* L3 States */
+	{
+		MHI_PM_LD_ERR_FATAL_DETECT,
+		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+	},
+};
+
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
+				struct mhi_controller *mhi_cntrl,
+				enum MHI_PM_STATE state)
+{
+	unsigned long cur_state = mhi_cntrl->pm_state;
+	int index = find_last_bit(&cur_state, 32);
+
+	if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) {
+		MHI_CRITICAL("cur_state:%s is not a valid pm_state\n",
+			     to_mhi_pm_state_str(cur_state));
+		return cur_state;
+	}
+
+	if (unlikely(mhi_state_transitions[index].from_state != cur_state)) {
+		MHI_ERR("index:%u cur_state:%s != actual_state: %s\n",
+			index, to_mhi_pm_state_str(cur_state),
+			to_mhi_pm_state_str
+			(mhi_state_transitions[index].from_state));
+		return cur_state;
+	}
+
+	if (unlikely(!(mhi_state_transitions[index].to_states & state))) {
+		MHI_LOG(
+			"Not allowing pm state transition from:%s to:%s state\n",
+			to_mhi_pm_state_str(cur_state),
+			to_mhi_pm_state_str(state));
+		return cur_state;
+	}
+
+	MHI_VERB("Transition to pm state from:%s to:%s\n",
+		 to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state));
+
+	if (MHI_REG_ACCESS_VALID(cur_state) && MHI_REG_ACCESS_VALID(state))
+		mhi_timesync_log(mhi_cntrl);
+
+	mhi_cntrl->pm_state = state;
+	return mhi_cntrl->pm_state;
+}
+
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+		       enum mhi_dev_state state)
+{
+	if (state == MHI_STATE_RESET) {
+		mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+				    MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
+	} else {
+		mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+			MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state);
+	}
+}
+
+/* nop for backward compatibility, allowed to ring db registers in M2 state */
+static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
+{
+	mhi_cntrl->wake_get(mhi_cntrl, false);
+	mhi_cntrl->wake_put(mhi_cntrl, true);
+}
+
+/* set device wake */
+void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
+{
+	unsigned long flags;
+
+	/* if set, regardless of count set the bit if not set */
+	if (unlikely(force)) {
+		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+		atomic_inc(&mhi_cntrl->dev_wake);
+		if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
+		    !mhi_cntrl->wake_set) {
+			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+			mhi_cntrl->wake_set = true;
+		}
+		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+	} else {
+		/* if resources requested already, then increment and exit */
+		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
+			return;
+
+		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
+		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
+		    !mhi_cntrl->wake_set) {
+			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
+			mhi_cntrl->wake_set = true;
+		}
+		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+	}
+}
+
+/* clear device wake */
+void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override)
+{
+	unsigned long flags;
+
+	MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0");
+
+	/* resources not dropping to 0, decrement and exit */
+	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
+		return;
+
+	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
+	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
+	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
+	    mhi_cntrl->wake_set) {
+		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
+		mhi_cntrl->wake_set = false;
+	}
+	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
+}
+
+int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
+{
+	void __iomem *base = mhi_cntrl->regs;
+	u32 reset = 1, ready = 0;
+	struct mhi_event *mhi_event;
+	enum MHI_PM_STATE cur_state;
+	int ret, i;
+
+	MHI_LOG("Waiting to enter READY state\n");
+
+	/* wait for RESET to be cleared and READY bit to be set */
+	wait_event_timeout(mhi_cntrl->state_event,
+			   MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+			   mhi_read_reg_field(mhi_cntrl, base, MHICTRL,
+					      MHICTRL_RESET_MASK,
+					      MHICTRL_RESET_SHIFT, &reset) ||
+			   mhi_read_reg_field(mhi_cntrl, base, MHISTATUS,
+					      MHISTATUS_READY_MASK,
+					      MHISTATUS_READY_SHIFT, &ready) ||
+			   (!reset && ready),
+			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	/* device enter into error state */
+	if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state))
+		return -EIO;
+
+	/* device did not transition to ready state */
+	if (reset || !ready)
+		return -ETIMEDOUT;
+
+	MHI_LOG("Device in READY State\n");
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
+	mhi_cntrl->dev_state = MHI_STATE_READY;
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	if (cur_state != MHI_PM_POR) {
+		MHI_ERR("Error moving to state %s from %s\n",
+			to_mhi_pm_state_str(MHI_PM_POR),
+			to_mhi_pm_state_str(cur_state));
+		return -EIO;
+	}
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+		goto error_mmio;
+
+	ret = mhi_init_mmio(mhi_cntrl);
+	if (ret) {
+		MHI_ERR("Error programming mmio registers\n");
+		goto error_mmio;
+	}
+
+	/* add elements to all sw event rings */
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		if (mhi_event->offload_ev || mhi_event->hw_ring)
+			continue;
+
+		ring->wp = ring->base + ring->len - ring->el_size;
+		*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+		/* needs to update to all cores */
+		smp_wmb();
+
+		/* ring the db for event rings */
+		spin_lock_irq(&mhi_event->lock);
+		mhi_ring_er_db(mhi_event);
+		spin_unlock_irq(&mhi_event->lock);
+	}
+
+	/* set device into M0 state */
+	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return 0;
+
+error_mmio:
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return -EIO;
+}
+
+int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
+{
+	enum MHI_PM_STATE cur_state;
+	struct mhi_chan *mhi_chan;
+	int i;
+
+	MHI_LOG("Entered With State:%s PM_STATE:%s\n",
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	mhi_cntrl->dev_state = MHI_STATE_M0;
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+	if (unlikely(cur_state != MHI_PM_M0)) {
+		MHI_ERR("Failed to transition to state %s from %s\n",
+			to_mhi_pm_state_str(MHI_PM_M0),
+			to_mhi_pm_state_str(cur_state));
+		return -EIO;
+	}
+	mhi_cntrl->M0++;
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->wake_get(mhi_cntrl, true);
+
+	/* ring all event rings and CMD ring only if we're in mission mode */
+	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
+		struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
+		struct mhi_cmd *mhi_cmd =
+			&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
+
+		for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+			if (mhi_event->offload_ev)
+				continue;
+
+			spin_lock_irq(&mhi_event->lock);
+			mhi_ring_er_db(mhi_event);
+			spin_unlock_irq(&mhi_event->lock);
+		}
+
+		/* only ring primary cmd ring */
+		spin_lock_irq(&mhi_cmd->lock);
+		if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
+			mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
+		spin_unlock_irq(&mhi_cmd->lock);
+	}
+
+	/* ring channel db registers */
+	mhi_chan = mhi_cntrl->mhi_chan;
+	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
+		struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+		write_lock_irq(&mhi_chan->lock);
+		if (mhi_chan->db_cfg.reset_req)
+			mhi_chan->db_cfg.db_mode = true;
+
+		/* only ring DB if ring is not empty */
+		if (tre_ring->base && tre_ring->wp  != tre_ring->rp)
+			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
+		write_unlock_irq(&mhi_chan->lock);
+	}
+
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+	wake_up_all(&mhi_cntrl->state_event);
+	MHI_VERB("Exited\n");
+
+	return 0;
+}
+
+void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
+{
+	enum MHI_PM_STATE state;
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	/* if it fails, means we transition to M3 */
+	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
+	if (state == MHI_PM_M2) {
+		MHI_VERB("Entered M2 State\n");
+		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
+		mhi_cntrl->dev_state = MHI_STATE_M2;
+		mhi_cntrl->M2++;
+
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		wake_up_all(&mhi_cntrl->state_event);
+
+		/* transfer pending, exit M2 immediately */
+		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
+			     atomic_read(&mhi_cntrl->dev_wake))) {
+			MHI_VERB(
+				 "Exiting M2 Immediately, pending_pkts:%d dev_wake:%d\n",
+				 atomic_read(&mhi_cntrl->pending_pkts),
+				 atomic_read(&mhi_cntrl->dev_wake));
+			read_lock_bh(&mhi_cntrl->pm_lock);
+			mhi_cntrl->wake_get(mhi_cntrl, true);
+			mhi_cntrl->wake_put(mhi_cntrl, true);
+			read_unlock_bh(&mhi_cntrl->pm_lock);
+		} else {
+			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+					     MHI_CB_IDLE);
+		}
+	} else {
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+	}
+}
+
+int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
+{
+	enum MHI_PM_STATE state;
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	mhi_cntrl->dev_state = MHI_STATE_M3;
+	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+	if (state != MHI_PM_M3) {
+		MHI_ERR("Failed to transition to state %s from %s\n",
+			to_mhi_pm_state_str(MHI_PM_M3),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+	wake_up_all(&mhi_cntrl->state_event);
+	mhi_cntrl->M3++;
+
+	MHI_LOG("Entered mhi_state:%s pm_state:%s\n",
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		to_mhi_pm_state_str(mhi_cntrl->pm_state));
+	return 0;
+}
+
+static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
+{
+	int i, ret;
+	struct mhi_event *mhi_event;
+
+	MHI_LOG("Processing Mission Mode Transition\n");
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+		mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+		return -EIO;
+
+	wake_up_all(&mhi_cntrl->state_event);
+
+	mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+			     MHI_CB_EE_MISSION_MODE);
+
+	/* force MHI to be in M0 state before continuing */
+	ret = __mhi_device_get_sync(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+
+	/* add elements to all HW event rings */
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		ret = -EIO;
+		goto error_mission_mode;
+	}
+
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		if (mhi_event->offload_ev || !mhi_event->hw_ring)
+			continue;
+
+		ring->wp = ring->base + ring->len - ring->el_size;
+		*ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
+		/* all ring updates must get updated immediately */
+		smp_wmb();
+
+		spin_lock_irq(&mhi_event->lock);
+		if (MHI_DB_ACCESS_VALID(mhi_cntrl))
+			mhi_ring_er_db(mhi_event);
+		spin_unlock_irq(&mhi_event->lock);
+
+	}
+
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	/* setup support for time sync */
+	mhi_init_timesync(mhi_cntrl);
+
+	MHI_LOG("Adding new devices\n");
+
+	/* add supported devices */
+	mhi_create_devices(mhi_cntrl);
+
+	/* setup sysfs nodes for userspace votes */
+	mhi_create_vote_sysfs(mhi_cntrl);
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+
+error_mission_mode:
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	MHI_LOG("Exit with ret:%d\n", ret);
+
+	return ret;
+}
+
+/* handles both sys_err and shutdown transitions */
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+				      enum MHI_PM_STATE transition_state)
+{
+	enum MHI_PM_STATE cur_state, prev_state;
+	struct mhi_event *mhi_event;
+	struct mhi_cmd_ctxt *cmd_ctxt;
+	struct mhi_cmd *mhi_cmd;
+	struct mhi_event_ctxt *er_ctxt;
+	int ret, i;
+
+	MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		to_mhi_pm_state_str(transition_state));
+
+	/* We must notify MHI control driver so it can clean up first */
+	if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
+		/*
+		 * if controller support rddm, we do not process
+		 * sys error state, instead we will jump directly
+		 * to rddm state
+		 */
+		if (mhi_cntrl->rddm_image) {
+			MHI_LOG(
+				"Controller Support RDDM, skipping SYS_ERR_PROCESS\n");
+			return;
+		}
+		mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+				     MHI_CB_SYS_ERROR);
+	}
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	prev_state = mhi_cntrl->pm_state;
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
+	if (cur_state == transition_state) {
+		mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
+		mhi_cntrl->dev_state = MHI_STATE_RESET;
+	}
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/* wake up any threads waiting for state transitions */
+	wake_up_all(&mhi_cntrl->state_event);
+
+	/* not handling sys_err, could be middle of shut down */
+	if (cur_state != transition_state) {
+		MHI_LOG("Failed to transition to state:0x%x from:0x%x\n",
+			transition_state, cur_state);
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		return;
+	}
+
+	/* trigger MHI RESET so device will not access host ddr */
+	if (MHI_REG_ACCESS_VALID(prev_state)) {
+		u32 in_reset = -1;
+		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
+
+		MHI_LOG("Trigger device into MHI_RESET\n");
+		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+
+		/* wait for reset to be cleared */
+		ret = wait_event_timeout(mhi_cntrl->state_event,
+					 mhi_read_reg_field(mhi_cntrl,
+						mhi_cntrl->regs, MHICTRL,
+						MHICTRL_RESET_MASK,
+						MHICTRL_RESET_SHIFT, &in_reset)
+					 || !in_reset, timeout);
+		if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) {
+			MHI_CRITICAL("Device failed to exit RESET state\n");
+			mutex_unlock(&mhi_cntrl->pm_mutex);
+			return;
+		}
+
+		/*
+		 * device cleares INTVEC as part of RESET processing,
+		 * re-program it
+		 */
+		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+	}
+
+	MHI_LOG("Waiting for all pending event ring processing to complete\n");
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (!mhi_event->request_irq)
+			continue;
+		tasklet_kill(&mhi_event->task);
+	}
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	MHI_LOG("Reset all active channels and remove mhi devices\n");
+	device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device);
+
+	MHI_LOG("Finish resetting channels\n");
+
+	/* remove support for userspace votes */
+	mhi_destroy_vote_sysfs(mhi_cntrl);
+
+	MHI_LOG("Waiting for all pending threads to complete\n");
+	wake_up_all(&mhi_cntrl->state_event);
+	flush_work(&mhi_cntrl->st_worker);
+	flush_work(&mhi_cntrl->fw_worker);
+	flush_work(&mhi_cntrl->low_priority_worker);
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
+	MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0");
+	MHI_ASSERT(atomic_read(&mhi_cntrl->pending_pkts), "pending_pkts != 0");
+
+	/* reset the ev rings and cmd rings */
+	MHI_LOG("Resetting EV CTXT and CMD CTXT\n");
+	mhi_cmd = mhi_cntrl->mhi_cmd;
+	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
+	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
+		struct mhi_ring *ring = &mhi_cmd->ring;
+
+		ring->rp = ring->base;
+		ring->wp = ring->base;
+		cmd_ctxt->rp = cmd_ctxt->rbase;
+		cmd_ctxt->wp = cmd_ctxt->rbase;
+	}
+
+	mhi_event = mhi_cntrl->mhi_event;
+	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
+		     mhi_event++) {
+		struct mhi_ring *ring = &mhi_event->ring;
+
+		/* do not touch offload er */
+		if (mhi_event->offload_ev)
+			continue;
+
+		ring->rp = ring->base;
+		ring->wp = ring->base;
+		er_ctxt->rp = er_ctxt->rbase;
+		er_ctxt->wp = er_ctxt->rbase;
+	}
+
+	/* remove support for time sync */
+	mhi_destroy_timesync(mhi_cntrl);
+
+	if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+		mhi_ready_state_transition(mhi_cntrl);
+	} else {
+		/* move to disable state */
+		write_lock_irq(&mhi_cntrl->pm_lock);
+		cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		if (unlikely(cur_state != MHI_PM_DISABLE))
+			MHI_ERR("Error moving from pm state:%s to state:%s\n",
+				to_mhi_pm_state_str(cur_state),
+				to_mhi_pm_state_str(MHI_PM_DISABLE));
+	}
+
+	MHI_LOG("Exit with pm_state:%s mhi_state:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+}
+
+int mhi_debugfs_trigger_reset(void *data, u64 val)
+{
+	struct mhi_controller *mhi_cntrl = data;
+	enum MHI_PM_STATE cur_state;
+	int ret;
+
+	MHI_LOG("Trigger MHI Reset\n");
+
+	/* exit lpm first */
+	mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+	mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	if (cur_state == MHI_PM_SYS_ERR_DETECT)
+		schedule_work(&mhi_cntrl->syserr_worker);
+
+	return 0;
+}
+
+/* queue a new work item and scheduler work */
+int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
+			       enum MHI_ST_TRANSITION state)
+{
+	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+	unsigned long flags;
+
+	if (!item)
+		return -ENOMEM;
+
+	item->state = state;
+	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
+	list_add_tail(&item->node, &mhi_cntrl->transition_list);
+	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
+
+	schedule_work(&mhi_cntrl->st_worker);
+
+	return 0;
+}
+
+static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
+{
+	struct mhi_event *mhi_event;
+
+	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+		struct mhi_event_ctxt *er_ctxt =
+			&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+		struct mhi_ring *ev_ring = &mhi_event->ring;
+
+		spin_lock_bh(&mhi_event->lock);
+		if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
+			schedule_work(&mhi_cntrl->low_priority_worker);
+			spin_unlock_bh(&mhi_event->lock);
+			break;
+		}
+		spin_unlock_bh(&mhi_event->lock);
+	}
+}
+
+void mhi_low_priority_worker(struct work_struct *work)
+{
+	struct mhi_controller *mhi_cntrl = container_of(work,
+							struct mhi_controller,
+							low_priority_worker);
+	struct mhi_event *mhi_event;
+
+	MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
+		 to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+		 TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	/* check low priority event rings and process events */
+	list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+		if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+			mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+	}
+}
+
+void mhi_pm_sys_err_worker(struct work_struct *work)
+{
+	struct mhi_controller *mhi_cntrl = container_of(work,
+							struct mhi_controller,
+							syserr_worker);
+
+	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+}
+
+void mhi_pm_st_worker(struct work_struct *work)
+{
+	struct state_transition *itr, *tmp;
+	LIST_HEAD(head);
+	struct mhi_controller *mhi_cntrl = container_of(work,
+							struct mhi_controller,
+							st_worker);
+	spin_lock_irq(&mhi_cntrl->transition_lock);
+	list_splice_tail_init(&mhi_cntrl->transition_list, &head);
+	spin_unlock_irq(&mhi_cntrl->transition_lock);
+
+	list_for_each_entry_safe(itr, tmp, &head, node) {
+		list_del(&itr->node);
+		MHI_LOG("Transition to state:%s\n",
+			TO_MHI_STATE_TRANS_STR(itr->state));
+
+		switch (itr->state) {
+		case MHI_ST_TRANSITION_PBL:
+			write_lock_irq(&mhi_cntrl->pm_lock);
+			if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
+				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+			write_unlock_irq(&mhi_cntrl->pm_lock);
+			if (MHI_IN_PBL(mhi_cntrl->ee))
+				wake_up_all(&mhi_cntrl->state_event);
+			break;
+		case MHI_ST_TRANSITION_SBL:
+			write_lock_irq(&mhi_cntrl->pm_lock);
+			mhi_cntrl->ee = MHI_EE_SBL;
+			write_unlock_irq(&mhi_cntrl->pm_lock);
+			wake_up_all(&mhi_cntrl->state_event);
+			mhi_create_devices(mhi_cntrl);
+			break;
+		case MHI_ST_TRANSITION_MISSION_MODE:
+			mhi_pm_mission_mode_transition(mhi_cntrl);
+			break;
+		case MHI_ST_TRANSITION_READY:
+			mhi_ready_state_transition(mhi_cntrl);
+			break;
+		default:
+			break;
+		}
+		kfree(itr);
+	}
+}
+
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+	u32 val;
+	enum mhi_ee current_ee;
+	enum MHI_ST_TRANSITION next_state;
+
+	MHI_LOG("Requested to power on\n");
+
+	if (mhi_cntrl->msi_allocated < mhi_cntrl->total_ev_rings)
+		return -EINVAL;
+
+	/* set to default wake if any one is not set */
+	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
+	    !mhi_cntrl->wake_toggle) {
+		mhi_cntrl->wake_get = mhi_assert_dev_wake;
+		mhi_cntrl->wake_put = mhi_deassert_dev_wake;
+		mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
+			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
+	}
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+	mhi_cntrl->pm_state = MHI_PM_DISABLE;
+
+	if (!mhi_cntrl->pre_init) {
+		/* setup device context */
+		ret = mhi_init_dev_ctxt(mhi_cntrl);
+		if (ret) {
+			MHI_ERR("Error setting dev_context\n");
+			goto error_dev_ctxt;
+		}
+	}
+
+	ret = mhi_init_irq_setup(mhi_cntrl);
+	if (ret) {
+		MHI_ERR("Error setting up irq\n");
+		goto error_setup_irq;
+	}
+
+	/* setup bhi offset & intvec */
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
+	if (ret) {
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		MHI_ERR("Error getting bhi offset\n");
+		goto error_bhi_offset;
+	}
+
+	mhi_cntrl->bhi = mhi_cntrl->regs + val;
+
+	/* setup bhie offset if not set */
+	if (mhi_cntrl->fbc_download && !mhi_cntrl->bhie) {
+		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
+		if (ret) {
+			write_unlock_irq(&mhi_cntrl->pm_lock);
+			MHI_ERR("Error getting bhie offset\n");
+			goto error_bhi_offset;
+		}
+
+		mhi_cntrl->bhie = mhi_cntrl->regs + val;
+	}
+
+	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+	mhi_cntrl->pm_state = MHI_PM_POR;
+	mhi_cntrl->ee = MHI_EE_MAX;
+	current_ee = mhi_get_exec_env(mhi_cntrl);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/* confirm device is in valid exec env */
+	if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+		MHI_ERR("Not a valid ee for power on\n");
+		ret = -EIO;
+		goto error_bhi_offset;
+	}
+
+	/* transition to next state */
+	next_state = MHI_IN_PBL(current_ee) ?
+		MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY;
+
+	if (next_state == MHI_ST_TRANSITION_PBL)
+		schedule_work(&mhi_cntrl->fw_worker);
+
+	mhi_queue_state_transition(mhi_cntrl, next_state);
+
+	mhi_init_debugfs(mhi_cntrl);
+
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	MHI_LOG("Power on setup success\n");
+
+	return 0;
+
+error_bhi_offset:
+	mhi_deinit_free_irq(mhi_cntrl);
+
+error_setup_irq:
+	if (!mhi_cntrl->pre_init)
+		mhi_deinit_dev_ctxt(mhi_cntrl);
+
+error_dev_ctxt:
+	mutex_unlock(&mhi_cntrl->pm_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_async_power_up);
+
+/* Transition MHI into error state and notify critical clients */
+void mhi_control_error(struct mhi_controller *mhi_cntrl)
+{
+	enum MHI_PM_STATE cur_state;
+
+	MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_LD_ERR_FATAL_DETECT);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) {
+		MHI_ERR("Failed to transition to state:%s from:%s\n",
+			to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+			to_mhi_pm_state_str(cur_state));
+		goto exit_control_error;
+	}
+
+	/* notify waiters to bail out early since MHI has entered ERROR state */
+	wake_up_all(&mhi_cntrl->state_event);
+
+	/* start notifying all clients who request early notification */
+	device_for_each_child(mhi_cntrl->dev, NULL, mhi_early_notify_device);
+
+exit_control_error:
+	MHI_LOG("Exit with pm_state:%s MHI_STATE:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+}
+EXPORT_SYMBOL(mhi_control_error);
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+	enum MHI_PM_STATE cur_state;
+
+	/* if it's not graceful shutdown, force MHI to a linkdown state */
+	if (!graceful) {
+		mutex_lock(&mhi_cntrl->pm_mutex);
+		write_lock_irq(&mhi_cntrl->pm_lock);
+		cur_state = mhi_tryset_pm_state(mhi_cntrl,
+						MHI_PM_LD_ERR_FATAL_DETECT);
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		mutex_unlock(&mhi_cntrl->pm_mutex);
+		if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT)
+			MHI_ERR("Failed to move to state:%s from:%s\n",
+				to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
+				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+	}
+	mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+	mhi_deinit_debugfs(mhi_cntrl);
+
+	mhi_deinit_free_irq(mhi_cntrl);
+
+	if (!mhi_cntrl->pre_init) {
+		/* free all allocated resources */
+		if (mhi_cntrl->fbc_image) {
+			mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+			mhi_cntrl->fbc_image = NULL;
+		}
+		mhi_deinit_dev_ctxt(mhi_cntrl);
+	}
+}
+EXPORT_SYMBOL(mhi_power_down);
+
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
+{
+	int ret = mhi_async_power_up(mhi_cntrl);
+
+	if (ret)
+		return ret;
+
+	wait_event_timeout(mhi_cntrl->state_event,
+			   MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
+			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(mhi_sync_power_up);
+
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+	enum MHI_PM_STATE new_state;
+	struct mhi_chan *itr, *tmp;
+	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+		return -EINVAL;
+
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+		return -EIO;
+
+	/* do a quick check to see if any pending votes to keep us busy */
+	if (atomic_read(&mhi_cntrl->dev_wake) ||
+	    atomic_read(&mhi_cntrl->pending_pkts) ||
+	    atomic_read(&mhi_dev->bus_vote)) {
+		MHI_VERB("Busy, aborting M3\n");
+		return -EBUSY;
+	}
+
+	/* exit MHI out of M2 state */
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->wake_get(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
+				 mhi_cntrl->dev_state == MHI_STATE_M1 ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR(
+			"Did not enter M0||M1 state, cur_state:%s pm_state:%s\n",
+			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto error_m0_entry;
+	}
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+
+	/*
+	 * Check the votes once more to see if we should abort
+	 * suepend. We're asserting wake so count would be @ least 1
+	 */
+	if (atomic_read(&mhi_cntrl->dev_wake) > 1 ||
+	    atomic_read(&mhi_cntrl->pending_pkts) ||
+	    atomic_read(&mhi_dev->bus_vote)) {
+		MHI_VERB("Busy, aborting M3\n");
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		ret = -EBUSY;
+		goto error_m0_entry;
+	}
+
+	/* anytime after this, we will resume thru runtime pm framework */
+	MHI_LOG("Allowing M3 transition\n");
+	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+	if (new_state != MHI_PM_M3_ENTER) {
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+		ret = -EIO;
+		goto error_m0_entry;
+	}
+
+	/* set dev to M3 and wait for completion */
+	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+	MHI_LOG("Wait for M3 completion\n");
+
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->dev_state == MHI_STATE_M3 ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("Did not enter M3 state, cur_state:%s pm_state:%s\n",
+			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+
+	/* notify any clients we enter lpm */
+	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+		mutex_lock(&itr->mutex);
+		if (itr->mhi_dev)
+			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+		mutex_unlock(&itr->mutex);
+	}
+
+	return 0;
+
+error_m0_entry:
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_pm_suspend);
+
+/**
+ * mhi_pm_fast_suspend - Faster suspend path where we transition host to
+ * inactive state w/o suspending device.  Useful for cases where we want apps to
+ * go into power collapse but keep the physical link in active state.
+ */
+int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
+{
+	int ret;
+	enum MHI_PM_STATE new_state;
+	struct mhi_chan *itr, *tmp;
+
+	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+		return -EINVAL;
+
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+		return -EIO;
+
+	/* do a quick check to see if any pending votes to keep us busy */
+	if (atomic_read(&mhi_cntrl->pending_pkts)) {
+		MHI_VERB("Busy, aborting M3\n");
+		return -EBUSY;
+	}
+
+	/* disable ctrl event processing */
+	tasklet_disable(&mhi_cntrl->mhi_event->task);
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+
+	/*
+	 * Check the votes once more to see if we should abort
+	 * suspend.
+	 */
+	if (atomic_read(&mhi_cntrl->pending_pkts)) {
+		MHI_VERB("Busy, aborting M3\n");
+		ret = -EBUSY;
+		goto error_suspend;
+	}
+
+	/* anytime after this, we will resume thru runtime pm framework */
+	MHI_LOG("Allowing Fast M3 transition\n");
+
+	/* save the current states */
+	mhi_cntrl->saved_pm_state = mhi_cntrl->pm_state;
+	mhi_cntrl->saved_dev_state = mhi_cntrl->dev_state;
+
+	/* If we're in M2, we need to switch back to M0 first */
+	if (mhi_cntrl->pm_state == MHI_PM_M2) {
+		new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
+		if (new_state != MHI_PM_M0) {
+			MHI_ERR("Error set pm_state to:%s from pm_state:%s\n",
+				to_mhi_pm_state_str(MHI_PM_M0),
+				to_mhi_pm_state_str(mhi_cntrl->pm_state));
+			ret = -EIO;
+			goto error_suspend;
+		}
+	}
+
+	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+	if (new_state != MHI_PM_M3_ENTER) {
+		MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto error_suspend;
+	}
+
+	/* set dev to M3_FAST and host to M3 */
+	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
+	if (new_state != MHI_PM_M3) {
+		MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+			to_mhi_pm_state_str(MHI_PM_M3),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		ret = -EIO;
+		goto error_suspend;
+	}
+
+	mhi_cntrl->dev_state = MHI_STATE_M3_FAST;
+	mhi_cntrl->M3_FAST++;
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/* now safe to check ctrl event ring */
+	tasklet_enable(&mhi_cntrl->mhi_event->task);
+	mhi_msi_handlr(0, mhi_cntrl->mhi_event);
+
+	if (!notify_client)
+		return 0;
+
+	/* notify any clients we enter lpm */
+	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+		mutex_lock(&itr->mutex);
+		if (itr->mhi_dev)
+			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+		mutex_unlock(&itr->mutex);
+	}
+
+	return 0;
+
+error_suspend:
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	/* check ctrl event ring for pending work */
+	tasklet_enable(&mhi_cntrl->mhi_event->task);
+	mhi_msi_handlr(0, mhi_cntrl->mhi_event);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_pm_fast_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+	enum MHI_PM_STATE cur_state;
+	int ret;
+	struct mhi_chan *itr, *tmp;
+
+	MHI_LOG("Entered with pm_state:%s dev_state:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+		return 0;
+
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+		return -EIO;
+
+	MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3");
+
+	/* notify any clients we enter lpm */
+	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+		mutex_lock(&itr->mutex);
+		if (itr->mhi_dev)
+			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+		mutex_unlock(&itr->mutex);
+	}
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+	if (cur_state != MHI_PM_M3_EXIT) {
+		write_unlock_irq(&mhi_cntrl->pm_lock);
+		MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n",
+			to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		return -EIO;
+	}
+
+	/* set dev to M0 and wait for completion */
+	mhi_cntrl->wake_get(mhi_cntrl, true);
+	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->wake_put(mhi_cntrl, false);
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+		/*
+		 * It's possible device already in error state and we didn't
+		 * process it due to low power mode, force a check
+		 */
+		mhi_intvec_threaded_handlr(0, mhi_cntrl);
+		return -EIO;
+	}
+
+	/*
+	 * If MHI on host is in suspending/suspended state, we do not process
+	 * any low priority requests, for example, bandwidth scaling events
+	 * from the device. Check for low priority event rings and handle the
+	 * pending events upon resume.
+	 */
+	mhi_low_priority_events_pending(mhi_cntrl);
+
+	return 0;
+}
+
+int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
+{
+	struct mhi_chan *itr, *tmp;
+	struct mhi_event *mhi_event;
+	int i;
+
+	MHI_LOG("Entered with pm_state:%s dev_state:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+		return 0;
+
+	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+		return -EIO;
+
+	MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3");
+
+	/* notify any clients we're about to exit lpm */
+	if (notify_client) {
+		list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
+					 node) {
+			mutex_lock(&itr->mutex);
+			if (itr->mhi_dev)
+				mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+			mutex_unlock(&itr->mutex);
+		}
+	}
+
+	write_lock_irq(&mhi_cntrl->pm_lock);
+	/* restore the states */
+	mhi_cntrl->pm_state = mhi_cntrl->saved_pm_state;
+	mhi_cntrl->dev_state = mhi_cntrl->saved_dev_state;
+	write_unlock_irq(&mhi_cntrl->pm_lock);
+
+	switch (mhi_cntrl->pm_state) {
+	case MHI_PM_M0:
+		mhi_pm_m0_transition(mhi_cntrl);
+	case MHI_PM_M2:
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		/*
+		 * we're doing a double check of pm_state because by the time we
+		 * grab the pm_lock, device may have already initiate a M0 on
+		 * its own. If that's the case we should not be toggling device
+		 * wake.
+		 */
+		if (mhi_cntrl->pm_state == MHI_PM_M2) {
+			mhi_cntrl->wake_get(mhi_cntrl, true);
+			mhi_cntrl->wake_put(mhi_cntrl, true);
+		}
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+	}
+
+	/*
+	 * In fast suspend/resume case device is not aware host transition
+	 * to suspend state. So, device could be triggering a interrupt while
+	 * host not accepting MSI. We have to manually check each event ring
+	 * upon resume.
+	 */
+	mhi_event = mhi_cntrl->mhi_event;
+	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+		if (!mhi_event->request_irq)
+			continue;
+
+		mhi_msi_handlr(0, mhi_event);
+	}
+
+	/* schedules worker if any low priority events need to be handled */
+	mhi_low_priority_events_pending(mhi_cntrl);
+
+	MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_pm_resume);
+
+int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+
+	read_lock_bh(&mhi_cntrl->pm_lock);
+	mhi_cntrl->wake_get(mhi_cntrl, true);
+	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+		pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+	}
+	read_unlock_bh(&mhi_cntrl->pm_lock);
+
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->pm_state == MHI_PM_M0 ||
+				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+		MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n",
+			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+			to_mhi_pm_state_str(mhi_cntrl->pm_state));
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		mhi_cntrl->wake_put(mhi_cntrl, false);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void mhi_device_get(struct mhi_device *mhi_dev, int vote)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	if (vote & MHI_VOTE_DEVICE) {
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		mhi_cntrl->wake_get(mhi_cntrl, true);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+		atomic_inc(&mhi_dev->dev_vote);
+	}
+
+	if (vote & MHI_VOTE_BUS) {
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		atomic_inc(&mhi_dev->bus_vote);
+	}
+}
+EXPORT_SYMBOL(mhi_device_get);
+
+int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+	int ret;
+
+	/*
+	 * regardless of any vote we will bring device out lpm and assert
+	 * device wake
+	 */
+	ret = __mhi_device_get_sync(mhi_cntrl);
+	if (ret)
+		return ret;
+
+	if (vote & MHI_VOTE_DEVICE) {
+		atomic_inc(&mhi_dev->dev_vote);
+	} else {
+		/* client did not requested device vote so de-assert dev_wake */
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		mhi_cntrl->wake_put(mhi_cntrl, false);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+	}
+
+	if (vote & MHI_VOTE_BUS) {
+		mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+		atomic_inc(&mhi_dev->bus_vote);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mhi_device_get_sync);
+
+void mhi_device_put(struct mhi_device *mhi_dev, int vote)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	if (vote & MHI_VOTE_DEVICE) {
+		atomic_dec(&mhi_dev->dev_vote);
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+			mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data);
+			mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+		}
+		mhi_cntrl->wake_put(mhi_cntrl, false);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
+	}
+
+	if (vote & MHI_VOTE_BUS) {
+		atomic_dec(&mhi_dev->bus_vote);
+		mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data);
+
+		/*
+		 * if counts reach 0, clients release all votes
+		 * send idle cb to to attempt suspend
+		 */
+		if (!atomic_read(&mhi_dev->bus_vote))
+			mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
+					     MHI_CB_IDLE);
+	}
+}
+EXPORT_SYMBOL(mhi_device_put);
+
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
+{
+	int ret;
+
+	MHI_LOG("Enter with pm_state:%s ee:%s\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+	/* device already in rddm */
+	if (mhi_cntrl->ee == MHI_EE_RDDM)
+		return 0;
+
+	MHI_LOG("Triggering SYS_ERR to force rddm state\n");
+	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+
+	/* wait for rddm event */
+	MHI_LOG("Waiting for device to enter RDDM state\n");
+	ret = wait_event_timeout(mhi_cntrl->state_event,
+				 mhi_cntrl->ee == MHI_EE_RDDM,
+				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
+	ret = ret ? 0 : -EIO;
+
+	MHI_LOG("Exiting with pm_state:%s ee:%s ret:%d\n",
+		to_mhi_pm_state_str(mhi_cntrl->pm_state),
+		TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(mhi_force_rddm_mode);
diff --git a/drivers/bus/mhi/devices/Kconfig b/drivers/bus/mhi/devices/Kconfig
new file mode 100644
index 0000000..385137f
--- /dev/null
+++ b/drivers/bus/mhi/devices/Kconfig
@@ -0,0 +1,21 @@
+menu "MHI device support"
+
+config MHI_NETDEV
+       tristate "MHI NETDEV"
+       depends on MHI_BUS
+       help
+	  MHI based net device driver for transferring
+	  IP traffic between host and modem. By enabling
+	  this driver, clients can transfer data using
+	  standard network interface.
+
+config MHI_UCI
+       tristate "MHI UCI"
+       depends on MHI_BUS
+       help
+	  MHI based uci driver is for transferring data
+	  between host and modem using standard file operations
+	  from user space. Open, read, write, ioctl, and close
+	  operations are supported by this driver.
+
+endmenu
diff --git a/drivers/bus/mhi/devices/Makefile b/drivers/bus/mhi/devices/Makefile
new file mode 100644
index 0000000..300eed1
--- /dev/null
+++ b/drivers/bus/mhi/devices/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o
+obj-$(CONFIG_MHI_UCI) +=mhi_uci.o
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
new file mode 100644
index 0000000..7e1cffb
--- /dev/null
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -0,0 +1,1137 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/msm_rmnet.h>
+#include <linux/if_arp.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/ipc_logging.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_device.h>
+#include <linux/rtnetlink.h>
+#include <linux/kthread.h>
+#include <linux/mhi.h>
+
+#define MHI_NETDEV_DRIVER_NAME "mhi_netdev"
+#define WATCHDOG_TIMEOUT (30 * HZ)
+#define IPC_LOG_PAGES (100)
+#define MAX_NETBUF_SIZE (128)
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE)
+
+#define MHI_ASSERT(cond, msg) do { \
+	if (cond) \
+		panic(msg); \
+} while (0)
+
+#define MSG_VERB(fmt, ...) do { \
+	if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \
+		pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
+	if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \
+				    MHI_MSG_LVL_VERBOSE)) \
+		ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \
+			       __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR)
+
+#define MHI_ASSERT(cond, msg) do { \
+	if (cond) { \
+		MSG_ERR(msg); \
+		WARN_ON(cond); \
+	} \
+} while (0)
+
+#define MSG_VERB(fmt, ...)
+
+#endif
+
+#define MSG_LOG(fmt, ...) do { \
+	if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \
+		pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
+	if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \
+				    MHI_MSG_LVL_INFO)) \
+		ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \
+			       __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MSG_ERR(fmt, ...) do { \
+	if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \
+		pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+	if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \
+				    MHI_MSG_LVL_ERROR)) \
+		ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \
+			       __func__, ##__VA_ARGS__); \
+} while (0)
+
+struct mhi_net_chain {
+	struct sk_buff *head, *tail; /* chained skb */
+};
+
+struct mhi_netdev {
+	int alias;
+	struct mhi_device *mhi_dev;
+	struct mhi_netdev *rsc_dev; /* rsc linked node */
+	struct mhi_netdev *rsc_parent;
+	bool is_rsc_dev;
+	int wake;
+
+	u32 mru;
+	u32 order;
+	const char *interface_name;
+	struct napi_struct *napi;
+	struct net_device *ndev;
+
+	struct list_head *recycle_pool;
+	int pool_size;
+	bool chain_skb;
+	struct mhi_net_chain *chain;
+
+	struct task_struct *alloc_task;
+	wait_queue_head_t alloc_event;
+	int bg_pool_limit; /* minimum pool size */
+	int bg_pool_size; /* current size of the pool */
+	struct list_head *bg_pool;
+	spinlock_t bg_lock; /* lock to access list */
+
+
+	struct dentry *dentry;
+	enum MHI_DEBUG_LEVEL msg_lvl;
+	enum MHI_DEBUG_LEVEL ipc_log_lvl;
+	void *ipc_log;
+
+	/* debug stats */
+	u32 abuffers, kbuffers, rbuffers;
+};
+
+struct mhi_netdev_priv {
+	struct mhi_netdev *mhi_netdev;
+};
+
+/* Try not to make this structure bigger than 128 bytes, since this take space
+ * in payload packet.
+ * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf)
+ */
+struct mhi_netbuf {
+	struct mhi_buf mhi_buf; /* this must be first element */
+	bool recycle;
+	void (*unmap)(struct device *dev, dma_addr_t addr, size_t size,
+		      enum dma_data_direction dir);
+};
+
+static struct mhi_driver mhi_netdev_driver;
+static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev);
+
+static __be16 mhi_netdev_ip_type_trans(u8 data)
+{
+	__be16 protocol = 0;
+
+	/* determine L3 protocol */
+	switch (data & 0xf0) {
+	case 0x40:
+		protocol = htons(ETH_P_IP);
+		break;
+	case 0x60:
+		protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		/* default is QMAP */
+		protocol = htons(ETH_P_MAP);
+		break;
+	}
+	return protocol;
+}
+
+static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
+					   gfp_t gfp,
+					   unsigned int order)
+{
+	struct page *page;
+	struct mhi_netbuf *netbuf;
+	struct mhi_buf *mhi_buf;
+	void *vaddr;
+
+	page = __dev_alloc_pages(gfp, order);
+	if (!page)
+		return NULL;
+
+	vaddr = page_address(page);
+
+	/* we going to use the end of page to store cached data */
+	netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf);
+	netbuf->recycle = false;
+	mhi_buf = (struct mhi_buf *)netbuf;
+	mhi_buf->page = page;
+	mhi_buf->buf = vaddr;
+	mhi_buf->len = (void *)netbuf - vaddr;
+
+	if (!dev)
+		return netbuf;
+
+	mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len,
+					 DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, mhi_buf->dma_addr)) {
+		__free_pages(mhi_buf->page, order);
+		return NULL;
+	}
+
+	return netbuf;
+}
+
+static void mhi_netdev_unmap_page(struct device *dev,
+				  dma_addr_t dma_addr,
+				  size_t len,
+				  enum dma_data_direction dir)
+{
+	dma_unmap_page(dev, dma_addr, len, dir);
+}
+
+static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev,
+				struct mhi_device *mhi_dev,
+				int nr_tre)
+{
+	struct device *dev = mhi_dev->dev.parent;
+	const u32 order = mhi_netdev->order;
+	int i, ret;
+
+	for (i = 0; i < nr_tre; i++) {
+		struct mhi_buf *mhi_buf;
+		struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC,
+							     order);
+		if (!netbuf)
+			return -ENOMEM;
+
+		mhi_buf = (struct mhi_buf *)netbuf;
+		netbuf->unmap = mhi_netdev_unmap_page;
+
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+					 mhi_buf->len, MHI_EOT);
+		if (unlikely(ret)) {
+			MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
+			mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
+					      mhi_buf->len, DMA_FROM_DEVICE);
+			__free_pages(mhi_buf->page, order);
+			return ret;
+		}
+		mhi_netdev->abuffers++;
+	}
+
+	return 0;
+}
+
+static int mhi_netdev_queue_bg_pool(struct mhi_netdev *mhi_netdev,
+				    struct mhi_device *mhi_dev,
+				    int nr_tre)
+{
+	struct device *dev = mhi_dev->dev.parent;
+	int i, ret;
+	LIST_HEAD(head);
+
+	spin_lock_bh(&mhi_netdev->bg_lock);
+	list_splice_init(mhi_netdev->bg_pool, &head);
+	spin_unlock_bh(&mhi_netdev->bg_lock);
+
+	for (i = 0; i < nr_tre; i++) {
+		struct mhi_buf *mhi_buf =
+			list_first_entry_or_null(&head, struct mhi_buf, node);
+		struct mhi_netbuf *netbuf = (struct mhi_netbuf *)mhi_buf;
+
+		if (!mhi_buf)
+			break;
+
+		mhi_buf->dma_addr = dma_map_page(dev, mhi_buf->page, 0,
+						 mhi_buf->len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, mhi_buf->dma_addr))
+			break;
+
+		netbuf->unmap = mhi_netdev_unmap_page;
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+					 mhi_buf->len, MHI_EOT);
+		if (unlikely(ret)) {
+			MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
+			mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
+					      mhi_buf->len, DMA_FROM_DEVICE);
+			break;
+		}
+		list_del(&mhi_buf->node);
+		mhi_netdev->kbuffers++;
+	}
+
+	/* add remaining buffers back to main pool */
+	spin_lock_bh(&mhi_netdev->bg_lock);
+	list_splice(&head, mhi_netdev->bg_pool);
+	mhi_netdev->bg_pool_size -= i;
+	spin_unlock_bh(&mhi_netdev->bg_lock);
+
+
+	/* wake up the bg thread to allocate more buffers */
+	wake_up_interruptible(&mhi_netdev->alloc_event);
+
+	return i;
+}
+
+static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev,
+			     struct mhi_device *mhi_dev)
+{
+	struct device *dev = mhi_dev->dev.parent;
+	struct mhi_netbuf *netbuf;
+	struct mhi_buf *mhi_buf;
+	struct list_head *pool = mhi_netdev->recycle_pool;
+	int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+	int i, ret;
+	const int  max_peek = 4;
+
+	MSG_VERB("Enter free_desc:%d\n", nr_tre);
+
+	if (!nr_tre)
+		return;
+
+	/* try going thru reclaim pool first */
+	for (i = 0; i < nr_tre; i++) {
+		/* peek for the next buffer, we going to peak several times,
+		 * and we going to give up if buffers are not yet free
+		 */
+		int peek = 0;
+
+		netbuf = NULL;
+		list_for_each_entry(mhi_buf, pool, node) {
+			/* page == 1 idle, buffer is free to reclaim */
+			if (page_ref_count(mhi_buf->page) == 1) {
+				netbuf = (struct mhi_netbuf *)mhi_buf;
+				break;
+			}
+
+			if (peek++ >= max_peek)
+				break;
+		}
+
+		/* could not find a free buffer */
+		if (!netbuf)
+			break;
+
+		/* increment reference count so when network stack is done
+		 * with buffer, the buffer won't be freed
+		 */
+		page_ref_inc(mhi_buf->page);
+		list_del(&mhi_buf->node);
+		dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len,
+					   DMA_FROM_DEVICE);
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+					 mhi_buf->len, MHI_EOT);
+		if (unlikely(ret)) {
+			MSG_ERR("Failed to queue buffer, ret:%d\n", ret);
+			netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len,
+				      DMA_FROM_DEVICE);
+			page_ref_dec(mhi_buf->page);
+			list_add(&mhi_buf->node, pool);
+			return;
+		}
+		mhi_netdev->rbuffers++;
+	}
+
+	/* recycling did not work, buffers are still busy use bg pool */
+	if (i < nr_tre)
+		i += mhi_netdev_queue_bg_pool(mhi_netdev, mhi_dev, nr_tre - i);
+
+	/* recyling did not work, buffers are still busy allocate temp pkts */
+	if (i < nr_tre)
+		mhi_netdev_tmp_alloc(mhi_netdev, mhi_dev, nr_tre - i);
+}
+
+/* allocating pool of memory */
+static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
+{
+	int i;
+	struct mhi_netbuf *netbuf;
+	struct mhi_buf *mhi_buf, *tmp;
+	const u32 order = mhi_netdev->order;
+	struct device *dev = mhi_netdev->mhi_dev->dev.parent;
+	struct list_head *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+
+	if (!pool)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(pool);
+
+	for (i = 0; i < mhi_netdev->pool_size; i++) {
+		/* allocate paged data */
+		netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order);
+		if (!netbuf)
+			goto error_alloc_page;
+
+		netbuf->unmap = dma_sync_single_for_cpu;
+		netbuf->recycle = true;
+		mhi_buf = (struct mhi_buf *)netbuf;
+		list_add(&mhi_buf->node, pool);
+	}
+
+	mhi_netdev->recycle_pool = pool;
+
+	return 0;
+
+error_alloc_page:
+	list_for_each_entry_safe(mhi_buf, tmp, pool, node) {
+		list_del(&mhi_buf->node);
+		dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
+			       DMA_FROM_DEVICE);
+		__free_pages(mhi_buf->page, order);
+	}
+
+	kfree(pool);
+
+	return -ENOMEM;
+}
+
+static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev)
+{
+	struct device *dev = mhi_netdev->mhi_dev->dev.parent;
+	struct mhi_buf *mhi_buf, *tmp;
+
+	list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->recycle_pool, node) {
+		list_del(&mhi_buf->node);
+		dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
+			       DMA_FROM_DEVICE);
+		__free_pages(mhi_buf->page, mhi_netdev->order);
+	}
+
+	kfree(mhi_netdev->recycle_pool);
+
+	/* free the bg pool */
+	list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->bg_pool, node) {
+		list_del(&mhi_buf->node);
+		__free_pages(mhi_buf->page, mhi_netdev->order);
+		mhi_netdev->bg_pool_size--;
+	}
+}
+
+static int mhi_netdev_alloc_thread(void *data)
+{
+	struct mhi_netdev *mhi_netdev = data;
+	struct mhi_netbuf *netbuf;
+	struct mhi_buf *mhi_buf, *tmp_buf;
+	const u32 order = mhi_netdev->order;
+	LIST_HEAD(head);
+
+	while (!kthread_should_stop()) {
+		while (mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit) {
+			int buffers = 0, i;
+
+			/* do a bulk allocation */
+			for (i = 0; i < NAPI_POLL_WEIGHT; i++) {
+				if (kthread_should_stop())
+					goto exit_alloc;
+
+				netbuf = mhi_netdev_alloc(NULL, GFP_KERNEL,
+							  order);
+				if (!netbuf)
+					continue;
+
+				mhi_buf = (struct mhi_buf *)netbuf;
+				list_add(&mhi_buf->node, &head);
+				buffers++;
+			}
+
+			/* add the list to main pool */
+			spin_lock_bh(&mhi_netdev->bg_lock);
+			list_splice_init(&head, mhi_netdev->bg_pool);
+			mhi_netdev->bg_pool_size += buffers;
+			spin_unlock_bh(&mhi_netdev->bg_lock);
+		}
+
+		/* replenish the ring */
+		napi_schedule(mhi_netdev->napi);
+
+		/* wait for buffers to run low or thread to stop */
+		wait_event_interruptible(mhi_netdev->alloc_event,
+			kthread_should_stop() ||
+			mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit);
+	}
+
+exit_alloc:
+	list_for_each_entry_safe(mhi_buf, tmp_buf, &head, node) {
+		list_del(&mhi_buf->node);
+		__free_pages(mhi_buf->page, order);
+	}
+
+	return 0;
+}
+
+static int mhi_netdev_poll(struct napi_struct *napi, int budget)
+{
+	struct net_device *dev = napi->dev;
+	struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+	struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+	struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+	struct mhi_net_chain *chain = mhi_netdev->chain;
+	int rx_work = 0;
+
+	MSG_VERB("Entered\n");
+
+	rx_work = mhi_poll(mhi_dev, budget);
+
+	/* chained skb, push it to stack */
+	if (chain && chain->head) {
+		netif_receive_skb(chain->head);
+		chain->head = NULL;
+	}
+
+	if (rx_work < 0) {
+		MSG_ERR("Error polling ret:%d\n", rx_work);
+		napi_complete(napi);
+		return 0;
+	}
+
+	/* queue new buffers */
+	mhi_netdev_queue(mhi_netdev, mhi_dev);
+
+	if (rsc_dev)
+		mhi_netdev_queue(mhi_netdev, rsc_dev->mhi_dev);
+
+	/* complete work if # of packet processed less than allocated budget */
+	if (rx_work < budget)
+		napi_complete(napi);
+
+	MSG_VERB("polled %d pkts\n", rx_work);
+
+	return rx_work;
+}
+
+static int mhi_netdev_open(struct net_device *dev)
+{
+	struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+	struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+	MSG_LOG("Opened net dev interface\n");
+
+	/* tx queue may not necessarily be stopped already
+	 * so stop the queue if tx path is not enabled
+	 */
+	if (!mhi_dev->ul_chan)
+		netif_stop_queue(dev);
+	else
+		netif_start_queue(dev);
+
+	return 0;
+
+}
+
+static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+	struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+	if (new_mtu < 0 || mhi_dev->mtu < new_mtu)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+	struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+	int res = 0;
+
+	MSG_VERB("Entered\n");
+
+	res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len,
+				 MHI_EOT);
+	if (res) {
+		MSG_VERB("Failed to queue with reason:%d\n", res);
+		netif_stop_queue(dev);
+		res = NETDEV_TX_BUSY;
+	}
+
+	MSG_VERB("Exited\n");
+
+	return res;
+}
+
+static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
+{
+	struct rmnet_ioctl_extended_s ext_cmd;
+	int rc = 0;
+	struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
+	struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+	rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
+			    sizeof(struct rmnet_ioctl_extended_s));
+	if (rc)
+		return rc;
+
+	switch (ext_cmd.extended_ioctl) {
+	case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+		ext_cmd.u.data = 0;
+		break;
+	case RMNET_IOCTL_GET_DRIVER_NAME:
+		strlcpy(ext_cmd.u.if_name, mhi_netdev->interface_name,
+			sizeof(ext_cmd.u.if_name));
+		break;
+	case RMNET_IOCTL_SET_SLEEP_STATE:
+		if (ext_cmd.u.data && mhi_netdev->wake) {
+			/* Request to enable LPM */
+			MSG_VERB("Enable MHI LPM");
+			mhi_netdev->wake--;
+			mhi_device_put(mhi_dev, MHI_VOTE_DEVICE);
+		} else if (!ext_cmd.u.data && !mhi_netdev->wake) {
+			/* Request to disable LPM */
+			MSG_VERB("Disable MHI LPM");
+			mhi_netdev->wake++;
+			mhi_device_get(mhi_dev, MHI_VOTE_DEVICE);
+		}
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
+			  sizeof(struct rmnet_ioctl_extended_s));
+	return rc;
+}
+
+static int mhi_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int rc = 0;
+	struct rmnet_ioctl_data_s ioctl_data;
+
+	switch (cmd) {
+	case RMNET_IOCTL_SET_LLP_IP: /* set RAWIP protocol */
+		break;
+	case RMNET_IOCTL_GET_LLP: /* get link protocol state */
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+		    sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	case RMNET_IOCTL_GET_OPMODE: /* get operation mode */
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+		    sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	case RMNET_IOCTL_SET_QOS_ENABLE:
+		rc = -EINVAL;
+		break;
+	case RMNET_IOCTL_SET_QOS_DISABLE:
+		rc = 0;
+		break;
+	case RMNET_IOCTL_OPEN:
+	case RMNET_IOCTL_CLOSE:
+		/* we just ignore them and return success */
+		rc = 0;
+		break;
+	case RMNET_IOCTL_EXTENDED:
+		rc = mhi_netdev_ioctl_extended(dev, ifr);
+		break;
+	default:
+		/* don't fail any IOCTL right now */
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct net_device_ops mhi_netdev_ops_ip = {
+	.ndo_open = mhi_netdev_open,
+	.ndo_start_xmit = mhi_netdev_xmit,
+	.ndo_do_ioctl = mhi_netdev_ioctl,
+	.ndo_change_mtu = mhi_netdev_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+static void mhi_netdev_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &mhi_netdev_ops_ip;
+	ether_setup(dev);
+
+	/* set this after calling ether_setup */
+	dev->header_ops = 0;  /* No header */
+	dev->type = ARPHRD_RAWIP;
+	dev->hard_header_len = 0;
+	dev->addr_len = 0;
+	dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+	dev->watchdog_timeo = WATCHDOG_TIMEOUT;
+}
+
+/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */
+static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
+{
+	int ret = 0;
+	char ifalias[IFALIASZ];
+	char ifname[IFNAMSIZ];
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+	struct device_node *of_node = mhi_dev->dev.of_node;
+	struct mhi_netdev_priv *mhi_netdev_priv;
+
+	mhi_netdev->alias = of_alias_get_id(of_node, "mhi_netdev");
+	if (mhi_netdev->alias < 0)
+		return -ENODEV;
+
+	ret = of_property_read_string(of_node, "mhi,interface-name",
+				      &mhi_netdev->interface_name);
+	if (ret)
+		mhi_netdev->interface_name = mhi_netdev_driver.driver.name;
+
+	snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u",
+		 mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain,
+		 mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias);
+
+	snprintf(ifname, sizeof(ifname), "%s%%d", mhi_netdev->interface_name);
+
+	rtnl_lock();
+	mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv),
+					ifname, NET_NAME_PREDICTABLE,
+					mhi_netdev_setup);
+	if (!mhi_netdev->ndev) {
+		rtnl_unlock();
+		return -ENOMEM;
+	}
+
+	mhi_netdev->ndev->mtu = mhi_dev->mtu;
+	SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev);
+	dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias));
+	mhi_netdev_priv = netdev_priv(mhi_netdev->ndev);
+	mhi_netdev_priv->mhi_netdev = mhi_netdev;
+	rtnl_unlock();
+
+	mhi_netdev->napi = devm_kzalloc(&mhi_dev->dev,
+					sizeof(*mhi_netdev->napi), GFP_KERNEL);
+	if (!mhi_netdev->napi) {
+		ret = -ENOMEM;
+		goto napi_alloc_fail;
+	}
+
+	netif_napi_add(mhi_netdev->ndev, mhi_netdev->napi,
+		       mhi_netdev_poll, NAPI_POLL_WEIGHT);
+	ret = register_netdev(mhi_netdev->ndev);
+	if (ret) {
+		MSG_ERR("Network device registration failed\n");
+		goto net_dev_reg_fail;
+	}
+
+	napi_enable(mhi_netdev->napi);
+
+	MSG_LOG("Exited.\n");
+
+	return 0;
+
+net_dev_reg_fail:
+	netif_napi_del(mhi_netdev->napi);
+
+napi_alloc_fail:
+	free_netdev(mhi_netdev->ndev);
+	mhi_netdev->ndev = NULL;
+
+	return ret;
+}
+
+static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_result)
+{
+	struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+	struct sk_buff *skb = mhi_result->buf_addr;
+	struct net_device *ndev = mhi_netdev->ndev;
+
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += skb->len;
+	dev_kfree_skb(skb);
+
+	if (netif_queue_stopped(ndev))
+		netif_wake_queue(ndev);
+}
+
+static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev,
+				struct mhi_buf *mhi_buf,
+				struct mhi_result *mhi_result)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(0, GFP_ATOMIC);
+	if (!skb) {
+		__free_pages(mhi_buf->page, mhi_netdev->order);
+		return;
+	}
+
+	skb_add_rx_frag(skb, 0, mhi_buf->page, 0,
+			mhi_result->bytes_xferd, mhi_netdev->mru);
+	skb->dev = mhi_netdev->ndev;
+	skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf);
+	netif_receive_skb(skb);
+}
+
+static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_result)
+{
+	struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+	struct mhi_netbuf *netbuf = mhi_result->buf_addr;
+	struct mhi_buf *mhi_buf = &netbuf->mhi_buf;
+	struct sk_buff *skb;
+	struct net_device *ndev = mhi_netdev->ndev;
+	struct device *dev = mhi_dev->dev.parent;
+	struct mhi_net_chain *chain = mhi_netdev->chain;
+
+	netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE);
+	if (likely(netbuf->recycle))
+		list_add_tail(&mhi_buf->node, mhi_netdev->recycle_pool);
+
+	/* modem is down, drop the buffer */
+	if (mhi_result->transaction_status == -ENOTCONN) {
+		__free_pages(mhi_buf->page, mhi_netdev->order);
+		return;
+	}
+
+	ndev->stats.rx_packets++;
+	ndev->stats.rx_bytes += mhi_result->bytes_xferd;
+
+	if (unlikely(!chain)) {
+		mhi_netdev_push_skb(mhi_netdev, mhi_buf, mhi_result);
+		return;
+	}
+
+	/* we support chaining */
+	skb = alloc_skb(0, GFP_ATOMIC);
+	if (likely(skb)) {
+		skb_add_rx_frag(skb, 0, mhi_buf->page, 0,
+				mhi_result->bytes_xferd, mhi_netdev->mru);
+		/* this is first on list */
+		if (!chain->head) {
+			skb->dev = ndev;
+			skb->protocol =
+				mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf);
+			chain->head = skb;
+		} else {
+			skb_shinfo(chain->tail)->frag_list = skb;
+		}
+
+		chain->tail = skb;
+	} else {
+		__free_pages(mhi_buf->page, mhi_netdev->order);
+	}
+}
+
+static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
+{
+	struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+
+	if (mhi_cb != MHI_CB_PENDING_DATA)
+		return;
+
+	napi_schedule(mhi_netdev->napi);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dentry *dentry;
+
+static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
+{
+	struct mhi_netdev *mhi_netdev = m->private;
+
+	seq_printf(m,
+		   "mru:%u order:%u pool_size:%d, bg_pool_size:%d bg_pool_limit:%d abuf:%u kbuf:%u rbuf:%u\n",
+		   mhi_netdev->mru, mhi_netdev->order, mhi_netdev->pool_size,
+		   mhi_netdev->bg_pool_size, mhi_netdev->bg_pool_limit,
+		   mhi_netdev->abuffers, mhi_netdev->kbuffers,
+		   mhi_netdev->rbuffers);
+
+	return 0;
+}
+
+static int mhi_netdev_debugfs_stats_open(struct inode *inode, struct file *fp)
+{
+	return single_open(fp, mhi_netdev_debugfs_stats_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_stats = {
+	.open = mhi_netdev_debugfs_stats_open,
+	.release = single_release,
+	.read = seq_read,
+};
+
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
+{
+	struct mhi_netdev *mhi_netdev = data;
+	struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+
+	mhi_netdev->chain = NULL;
+
+	if (rsc_dev)
+		rsc_dev->chain = NULL;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
+			 mhi_netdev_debugfs_chain, "%llu\n");
+
+static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
+{
+	char node_name[32];
+	struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+
+	/* Both tx & rx client handle contain same device info */
+	snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u",
+		 mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain,
+		 mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias);
+
+	if (IS_ERR_OR_NULL(dentry))
+		return;
+
+	mhi_netdev->dentry = debugfs_create_dir(node_name, dentry);
+	if (IS_ERR_OR_NULL(mhi_netdev->dentry))
+		return;
+
+	debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
+				   mhi_netdev, &debugfs_stats);
+	debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
+				   mhi_netdev, &debugfs_chain);
+}
+
+static void mhi_netdev_create_debugfs_dir(void)
+{
+	dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, 0);
+}
+
+#else
+
+static void mhi_netdev_create_debugfs(struct mhi_netdev_private *mhi_netdev)
+{
+}
+
+static void mhi_netdev_create_debugfs_dir(void)
+{
+}
+
+#endif
+
+static void mhi_netdev_remove(struct mhi_device *mhi_dev)
+{
+	struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
+
+	MSG_LOG("Remove notification received\n");
+
+	/* rsc parent takes cares of the cleanup */
+	if (mhi_netdev->is_rsc_dev) {
+		mhi_netdev_free_pool(mhi_netdev);
+		return;
+	}
+
+	kthread_stop(mhi_netdev->alloc_task);
+	netif_stop_queue(mhi_netdev->ndev);
+	napi_disable(mhi_netdev->napi);
+	unregister_netdev(mhi_netdev->ndev);
+	netif_napi_del(mhi_netdev->napi);
+	free_netdev(mhi_netdev->ndev);
+
+	if (!IS_ERR_OR_NULL(mhi_netdev->dentry))
+		debugfs_remove_recursive(mhi_netdev->dentry);
+}
+
+static int mhi_netdev_match(struct device *dev, void *data)
+{
+	/* if phandle dt == device dt, we found a match */
+	return (dev->of_node == data);
+}
+
+static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev,
+				 struct mhi_netdev *parent)
+{
+	mhi_netdev->ndev = parent->ndev;
+	mhi_netdev->napi = parent->napi;
+	mhi_netdev->ipc_log = parent->ipc_log;
+	mhi_netdev->msg_lvl = parent->msg_lvl;
+	mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl;
+	mhi_netdev->is_rsc_dev = true;
+	mhi_netdev->chain = parent->chain;
+	mhi_netdev->rsc_parent = parent;
+	mhi_netdev->recycle_pool = parent->recycle_pool;
+	mhi_netdev->bg_pool = parent->bg_pool;
+}
+
+static int mhi_netdev_probe(struct mhi_device *mhi_dev,
+			    const struct mhi_device_id *id)
+{
+	int ret;
+	struct mhi_netdev *mhi_netdev, *p_netdev = NULL;
+	struct device_node *of_node = mhi_dev->dev.of_node;
+	int nr_tre;
+	char node_name[32];
+	struct device_node *phandle;
+	bool no_chain;
+
+	if (!of_node)
+		return -ENODEV;
+
+	mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev),
+				  GFP_KERNEL);
+	if (!mhi_netdev)
+		return -ENOMEM;
+
+	/* move mhi channels to start state */
+	ret = mhi_prepare_for_transfer(mhi_dev);
+	if (ret) {
+		MSG_ERR("Failed to start channels ret %d\n", ret);
+		return ret;
+	}
+
+	mhi_netdev->mhi_dev = mhi_dev;
+	mhi_device_set_devdata(mhi_dev, mhi_netdev);
+
+	ret = of_property_read_u32(of_node, "mhi,mru", &mhi_netdev->mru);
+	if (ret)
+		return -ENODEV;
+
+	/* MRU must be multiplication of page size */
+	mhi_netdev->order = __ilog2_u32(mhi_netdev->mru / PAGE_SIZE);
+	if ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru)
+		return -EINVAL;
+
+	/* check if this device shared by a parent device */
+	phandle = of_parse_phandle(of_node, "mhi,rsc-parent", 0);
+	if (phandle) {
+		struct device *dev;
+		struct mhi_device *pdev;
+		/* find the parent device */
+		dev = driver_find_device(mhi_dev->dev.driver, NULL, phandle,
+					 mhi_netdev_match);
+		if (!dev)
+			return -ENODEV;
+
+		/* this device is shared with parent device. so we won't be
+		 * creating a new network interface. Clone parent
+		 * information to child node
+		 */
+		pdev = to_mhi_device(dev);
+		p_netdev = mhi_device_get_devdata(pdev);
+		mhi_netdev_clone_dev(mhi_netdev, p_netdev);
+		put_device(dev);
+	} else {
+		mhi_netdev->msg_lvl = MHI_MSG_LVL_ERROR;
+		no_chain = of_property_read_bool(of_node,
+						 "mhi,disable-chain-skb");
+		if (!no_chain) {
+			mhi_netdev->chain = devm_kzalloc(&mhi_dev->dev,
+						sizeof(*mhi_netdev->chain),
+						GFP_KERNEL);
+			if (!mhi_netdev->chain)
+				return -ENOMEM;
+		}
+
+		ret = mhi_netdev_enable_iface(mhi_netdev);
+		if (ret)
+			return ret;
+
+		/* setup pool size ~2x ring length*/
+		nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+		mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
+		if (nr_tre > mhi_netdev->pool_size)
+			mhi_netdev->pool_size <<= 1;
+		mhi_netdev->pool_size <<= 1;
+
+		/* if we expect child device to share then double the pool */
+		if (of_parse_phandle(of_node, "mhi,rsc-child", 0))
+			mhi_netdev->pool_size <<= 1;
+
+		/* allocate memory pool */
+		ret = mhi_netdev_alloc_pool(mhi_netdev);
+		if (ret)
+			return -ENOMEM;
+
+		/* create a background task to allocate memory */
+		mhi_netdev->bg_pool = kmalloc(sizeof(*mhi_netdev->bg_pool),
+					      GFP_KERNEL);
+		if (!mhi_netdev->bg_pool)
+			return -ENOMEM;
+
+		init_waitqueue_head(&mhi_netdev->alloc_event);
+		INIT_LIST_HEAD(mhi_netdev->bg_pool);
+		spin_lock_init(&mhi_netdev->bg_lock);
+		mhi_netdev->bg_pool_limit = mhi_netdev->pool_size / 4;
+		mhi_netdev->alloc_task = kthread_run(mhi_netdev_alloc_thread,
+						     mhi_netdev,
+						     mhi_netdev->ndev->name);
+		if (IS_ERR(mhi_netdev->alloc_task))
+			return PTR_ERR(mhi_netdev->alloc_task);
+
+		/* create ipc log buffer */
+		snprintf(node_name, sizeof(node_name),
+			 "%s_%04x_%02u.%02u.%02u_%u",
+			 mhi_netdev->interface_name, mhi_dev->dev_id,
+			 mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
+			 mhi_netdev->alias);
+		mhi_netdev->ipc_log = ipc_log_context_create(IPC_LOG_PAGES,
+							     node_name, 0);
+		mhi_netdev->ipc_log_lvl = IPC_LOG_LVL;
+
+		mhi_netdev_create_debugfs(mhi_netdev);
+	}
+
+	/* link child node with parent node if it's children dev */
+	if (p_netdev)
+		p_netdev->rsc_dev = mhi_netdev;
+
+	/* now we have a pool of buffers allocated, queue to hardware
+	 * by triggering a napi_poll
+	 */
+	napi_schedule(mhi_netdev->napi);
+
+	return 0;
+}
+
+static const struct mhi_device_id mhi_netdev_match_table[] = {
+	{ .chan = "IP_HW0" },
+	{ .chan = "IP_HW0_RSC" },
+	{},
+};
+
+static struct mhi_driver mhi_netdev_driver = {
+	.id_table = mhi_netdev_match_table,
+	.probe = mhi_netdev_probe,
+	.remove = mhi_netdev_remove,
+	.ul_xfer_cb = mhi_netdev_xfer_ul_cb,
+	.dl_xfer_cb = mhi_netdev_xfer_dl_cb,
+	.status_cb = mhi_netdev_status_cb,
+	.driver = {
+		.name = "mhi_netdev",
+		.owner = THIS_MODULE,
+	}
+};
+
+static int __init mhi_netdev_init(void)
+{
+	BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE);
+	mhi_netdev_create_debugfs_dir();
+
+	return mhi_driver_register(&mhi_netdev_driver);
+}
+module_init(mhi_netdev_init);
+
+MODULE_DESCRIPTION("MHI NETDEV Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
new file mode 100644
index 0000000..1a8535a
--- /dev/null
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -0,0 +1,741 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/termios.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/mhi.h>
+
+#define DEVICE_NAME "mhi"
+#define MHI_UCI_DRIVER_NAME "mhi_uci"
+
+struct uci_chan {
+	wait_queue_head_t wq;
+	spinlock_t lock;
+	struct list_head pending; /* user space waiting to read */
+	struct uci_buf *cur_buf; /* current buffer user space reading */
+	size_t rx_size;
+};
+
+struct uci_buf {
+	void *data;
+	size_t len;
+	struct list_head node;
+};
+
+struct uci_dev {
+	struct list_head node;
+	dev_t devt;
+	struct device *dev;
+	struct mhi_device *mhi_dev;
+	const char *chan;
+	struct mutex mutex; /* sync open and close */
+	struct uci_chan ul_chan;
+	struct uci_chan dl_chan;
+	size_t mtu;
+	int ref_count;
+	bool enabled;
+	u32 tiocm;
+	void *ipc_log;
+};
+
+struct mhi_uci_drv {
+	struct list_head head;
+	struct mutex lock;
+	struct class *class;
+	int major;
+	dev_t dev_t;
+};
+
+enum MHI_DEBUG_LEVEL msg_lvl = MHI_MSG_LVL_ERROR;
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE)
+#define MHI_UCI_IPC_LOG_PAGES (25)
+
+#else
+
+#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR)
+#define MHI_UCI_IPC_LOG_PAGES (1)
+
+#endif
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MSG_VERB(fmt, ...) do { \
+		if (msg_lvl <= MHI_MSG_LVL_VERBOSE) \
+			pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
+		if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_VERBOSE)) \
+			ipc_log_string(uci_dev->ipc_log, "[D][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+	} while (0)
+
+#else
+
+#define MSG_VERB(fmt, ...)
+
+#endif
+
+#define MSG_LOG(fmt, ...) do { \
+		if (msg_lvl <= MHI_MSG_LVL_INFO) \
+			pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
+		if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_INFO)) \
+			ipc_log_string(uci_dev->ipc_log, "[I][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+	} while (0)
+
+#define MSG_ERR(fmt, ...) do { \
+		if (msg_lvl <= MHI_MSG_LVL_ERROR) \
+			pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+		if (uci_dev->ipc_log && (IPC_LOG_LVL <= MHI_MSG_LVL_ERROR)) \
+			ipc_log_string(uci_dev->ipc_log, "[E][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+	} while (0)
+
+#define MAX_UCI_DEVICES (64)
+
+static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
+static struct mhi_uci_drv mhi_uci_drv;
+
+static int mhi_queue_inbound(struct uci_dev *uci_dev)
+{
+	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+	size_t mtu = uci_dev->mtu;
+	void *buf;
+	struct uci_buf *uci_buf;
+	int ret = -EIO, i;
+
+	for (i = 0; i < nr_trbs; i++) {
+		buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+
+		uci_buf = buf + mtu;
+		uci_buf->data = buf;
+
+		MSG_VERB("Allocated buf %d of %d size %zx\n", i, nr_trbs, mtu);
+
+		ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
+					 MHI_EOT);
+		if (ret) {
+			kfree(buf);
+			MSG_ERR("Failed to queue buffer %d\n", i);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static long mhi_uci_ioctl(struct file *file,
+			  unsigned int cmd,
+			  unsigned long arg)
+{
+	struct uci_dev *uci_dev = file->private_data;
+	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
+	long ret = -ERESTARTSYS;
+
+	mutex_lock(&uci_dev->mutex);
+
+	if (cmd == TIOCMGET) {
+		spin_lock_bh(&uci_chan->lock);
+		ret = uci_dev->tiocm;
+		uci_dev->tiocm = 0;
+		spin_unlock_bh(&uci_chan->lock);
+	} else if (uci_dev->enabled) {
+		ret = mhi_ioctl(mhi_dev, cmd, arg);
+	}
+
+	mutex_unlock(&uci_dev->mutex);
+
+	return ret;
+}
+
+static int mhi_uci_release(struct inode *inode, struct file *file)
+{
+	struct uci_dev *uci_dev = file->private_data;
+
+	mutex_lock(&uci_dev->mutex);
+	uci_dev->ref_count--;
+	if (!uci_dev->ref_count) {
+		struct uci_buf *itr, *tmp;
+		struct uci_chan *uci_chan;
+
+		MSG_LOG("Last client left, closing node\n");
+
+		if (uci_dev->enabled)
+			mhi_unprepare_from_transfer(uci_dev->mhi_dev);
+
+		/* clean inbound channel */
+		uci_chan = &uci_dev->dl_chan;
+		list_for_each_entry_safe(itr, tmp, &uci_chan->pending, node) {
+			list_del(&itr->node);
+			kfree(itr->data);
+		}
+		if (uci_chan->cur_buf)
+			kfree(uci_chan->cur_buf->data);
+
+		uci_chan->cur_buf = NULL;
+
+		if (!uci_dev->enabled) {
+			MSG_LOG("Node is deleted, freeing dev node\n");
+			mutex_unlock(&uci_dev->mutex);
+			mutex_destroy(&uci_dev->mutex);
+			clear_bit(MINOR(uci_dev->devt), uci_minors);
+			kfree(uci_dev);
+			return 0;
+		}
+	}
+
+	MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
+
+	mutex_unlock(&uci_dev->mutex);
+
+	return 0;
+}
+
+static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
+{
+	struct uci_dev *uci_dev = file->private_data;
+	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	struct uci_chan *uci_chan;
+	unsigned int mask = 0;
+
+	poll_wait(file, &uci_dev->dl_chan.wq, wait);
+	poll_wait(file, &uci_dev->ul_chan.wq, wait);
+
+	uci_chan = &uci_dev->dl_chan;
+	spin_lock_bh(&uci_chan->lock);
+	if (!uci_dev->enabled) {
+		mask = POLLERR;
+	} else {
+		if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
+			MSG_VERB("Client can read from node\n");
+			mask |= POLLIN | POLLRDNORM;
+		}
+
+		if (uci_dev->tiocm) {
+			MSG_VERB("Line status changed\n");
+			mask |= POLLPRI;
+		}
+	}
+	spin_unlock_bh(&uci_chan->lock);
+
+	uci_chan = &uci_dev->ul_chan;
+	spin_lock_bh(&uci_chan->lock);
+	if (!uci_dev->enabled) {
+		mask |= POLLERR;
+	} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
+		MSG_VERB("Client can write to node\n");
+		mask |= POLLOUT | POLLWRNORM;
+	}
+	spin_unlock_bh(&uci_chan->lock);
+
+	MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
+
+	return mask;
+}
+
+static ssize_t mhi_uci_write(struct file *file,
+			     const char __user *buf,
+			     size_t count,
+			     loff_t *offp)
+{
+	struct uci_dev *uci_dev = file->private_data;
+	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	struct uci_chan *uci_chan = &uci_dev->ul_chan;
+	size_t bytes_xfered = 0;
+	int ret, nr_avail;
+
+	if (!buf || !count)
+		return -EINVAL;
+
+	/* confirm channel is active */
+	spin_lock_bh(&uci_chan->lock);
+	if (!uci_dev->enabled) {
+		spin_unlock_bh(&uci_chan->lock);
+		return -ERESTARTSYS;
+	}
+
+	MSG_VERB("Enter: to xfer:%zx bytes\n", count);
+
+	while (count) {
+		size_t xfer_size;
+		void *kbuf;
+		enum MHI_FLAGS flags;
+
+		spin_unlock_bh(&uci_chan->lock);
+
+		/* wait for free descriptors */
+		ret = wait_event_interruptible(uci_chan->wq,
+			(!uci_dev->enabled) ||
+			(nr_avail = mhi_get_no_free_descriptors(mhi_dev,
+							DMA_TO_DEVICE)) > 0);
+
+		if (ret == -ERESTARTSYS || !uci_dev->enabled) {
+			MSG_LOG("Exit signal caught for node or not enabled\n");
+			return -ERESTARTSYS;
+		}
+
+		xfer_size = min_t(size_t, count, uci_dev->mtu);
+		kbuf = kmalloc(xfer_size, GFP_KERNEL);
+		if (!kbuf) {
+			MSG_ERR("Failed to allocate memory %zx\n", xfer_size);
+			return -ENOMEM;
+		}
+
+		ret = copy_from_user(kbuf, buf, xfer_size);
+		if (unlikely(ret)) {
+			kfree(kbuf);
+			return ret;
+		}
+
+		spin_lock_bh(&uci_chan->lock);
+
+		/* if ring is full after this force EOT */
+		if (nr_avail > 1 && (count - xfer_size))
+			flags = MHI_CHAIN;
+		else
+			flags = MHI_EOT;
+
+		if (uci_dev->enabled)
+			ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
+						 xfer_size, flags);
+		else
+			ret = -ERESTARTSYS;
+
+		if (ret) {
+			kfree(kbuf);
+			goto sys_interrupt;
+		}
+
+		bytes_xfered += xfer_size;
+		count -= xfer_size;
+		buf += xfer_size;
+	}
+
+	spin_unlock_bh(&uci_chan->lock);
+	MSG_VERB("Exit: Number of bytes xferred:%zx\n", bytes_xfered);
+
+	return bytes_xfered;
+
+sys_interrupt:
+	spin_unlock_bh(&uci_chan->lock);
+
+	return ret;
+}
+
+static ssize_t mhi_uci_read(struct file *file,
+			    char __user *buf,
+			    size_t count,
+			    loff_t *ppos)
+{
+	struct uci_dev *uci_dev = file->private_data;
+	struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
+	struct uci_buf *uci_buf;
+	char *ptr;
+	size_t to_copy;
+	int ret = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	MSG_VERB("Client provided buf len:%zx\n", count);
+
+	/* confirm channel is active */
+	spin_lock_bh(&uci_chan->lock);
+	if (!uci_dev->enabled) {
+		spin_unlock_bh(&uci_chan->lock);
+		return -ERESTARTSYS;
+	}
+
+	/* No data available to read, wait */
+	if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
+		MSG_VERB("No data available to read waiting\n");
+
+		spin_unlock_bh(&uci_chan->lock);
+		ret = wait_event_interruptible(uci_chan->wq,
+				(!uci_dev->enabled ||
+				 !list_empty(&uci_chan->pending)));
+		if (ret == -ERESTARTSYS) {
+			MSG_LOG("Exit signal caught for node\n");
+			return -ERESTARTSYS;
+		}
+
+		spin_lock_bh(&uci_chan->lock);
+		if (!uci_dev->enabled) {
+			MSG_LOG("node is disabled\n");
+			ret = -ERESTARTSYS;
+			goto read_error;
+		}
+	}
+
+	/* new read, get the next descriptor from the list */
+	if (!uci_chan->cur_buf) {
+		uci_buf = list_first_entry_or_null(&uci_chan->pending,
+						   struct uci_buf, node);
+		if (unlikely(!uci_buf)) {
+			ret = -EIO;
+			goto read_error;
+		}
+
+		list_del(&uci_buf->node);
+		uci_chan->cur_buf = uci_buf;
+		uci_chan->rx_size = uci_buf->len;
+		MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size);
+	}
+
+	uci_buf = uci_chan->cur_buf;
+	spin_unlock_bh(&uci_chan->lock);
+
+	/* Copy the buffer to user space */
+	to_copy = min_t(size_t, count, uci_chan->rx_size);
+	ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
+	ret = copy_to_user(buf, ptr, to_copy);
+	if (ret)
+		return ret;
+
+	MSG_VERB("Copied %zx of %zx bytes\n", to_copy, uci_chan->rx_size);
+	uci_chan->rx_size -= to_copy;
+
+	/* we finished with this buffer, queue it back to hardware */
+	if (!uci_chan->rx_size) {
+		spin_lock_bh(&uci_chan->lock);
+		uci_chan->cur_buf = NULL;
+
+		if (uci_dev->enabled)
+			ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
+						 uci_buf->data, uci_dev->mtu,
+						 MHI_EOT);
+		else
+			ret = -ERESTARTSYS;
+
+		if (ret) {
+			MSG_ERR("Failed to recycle element\n");
+			kfree(uci_buf->data);
+			goto read_error;
+		}
+
+		spin_unlock_bh(&uci_chan->lock);
+	}
+
+	MSG_VERB("Returning %zx bytes\n", to_copy);
+
+	return to_copy;
+
+read_error:
+	spin_unlock_bh(&uci_chan->lock);
+
+	return ret;
+}
+
+static int mhi_uci_open(struct inode *inode, struct file *filp)
+{
+	struct uci_dev *uci_dev = NULL, *tmp_dev;
+	int ret = -EIO;
+	struct uci_buf *buf_itr, *tmp;
+	struct uci_chan *dl_chan;
+
+	mutex_lock(&mhi_uci_drv.lock);
+	list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) {
+		if (tmp_dev->devt == inode->i_rdev) {
+			uci_dev = tmp_dev;
+			break;
+		}
+	}
+
+	/* could not find a minor node */
+	if (!uci_dev)
+		goto error_exit;
+
+	mutex_lock(&uci_dev->mutex);
+	if (!uci_dev->enabled) {
+		MSG_ERR("Node exist, but not in active state!\n");
+		goto error_open_chan;
+	}
+
+	uci_dev->ref_count++;
+
+	MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
+
+	if (uci_dev->ref_count == 1) {
+		MSG_LOG("Starting channel\n");
+		ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
+		if (ret) {
+			MSG_ERR("Error starting transfer channels\n");
+			uci_dev->ref_count--;
+			goto error_open_chan;
+		}
+
+		ret = mhi_queue_inbound(uci_dev);
+		if (ret)
+			goto error_rx_queue;
+	}
+
+	filp->private_data = uci_dev;
+	mutex_unlock(&uci_dev->mutex);
+	mutex_unlock(&mhi_uci_drv.lock);
+
+	return 0;
+
+ error_rx_queue:
+	dl_chan = &uci_dev->dl_chan;
+	mhi_unprepare_from_transfer(uci_dev->mhi_dev);
+	list_for_each_entry_safe(buf_itr, tmp, &dl_chan->pending, node) {
+		list_del(&buf_itr->node);
+		kfree(buf_itr->data);
+	}
+
+ error_open_chan:
+	mutex_unlock(&uci_dev->mutex);
+
+error_exit:
+	mutex_unlock(&mhi_uci_drv.lock);
+
+	return ret;
+}
+
+static const struct file_operations mhidev_fops = {
+	.open = mhi_uci_open,
+	.release = mhi_uci_release,
+	.read = mhi_uci_read,
+	.write = mhi_uci_write,
+	.poll = mhi_uci_poll,
+	.unlocked_ioctl = mhi_uci_ioctl,
+};
+
+static void mhi_uci_remove(struct mhi_device *mhi_dev)
+{
+	struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+
+	MSG_LOG("Enter\n");
+
+
+	mutex_lock(&mhi_uci_drv.lock);
+	mutex_lock(&uci_dev->mutex);
+
+	/* disable the node */
+	spin_lock_irq(&uci_dev->dl_chan.lock);
+	spin_lock_irq(&uci_dev->ul_chan.lock);
+	uci_dev->enabled = false;
+	spin_unlock_irq(&uci_dev->ul_chan.lock);
+	spin_unlock_irq(&uci_dev->dl_chan.lock);
+	wake_up(&uci_dev->dl_chan.wq);
+	wake_up(&uci_dev->ul_chan.wq);
+
+	/* delete the node to prevent new opens */
+	device_destroy(mhi_uci_drv.class, uci_dev->devt);
+	uci_dev->dev = NULL;
+	list_del(&uci_dev->node);
+
+	/* safe to free memory only if all file nodes are closed */
+	if (!uci_dev->ref_count) {
+		mutex_unlock(&uci_dev->mutex);
+		mutex_destroy(&uci_dev->mutex);
+		clear_bit(MINOR(uci_dev->devt), uci_minors);
+		kfree(uci_dev);
+		mutex_unlock(&mhi_uci_drv.lock);
+		return;
+	}
+
+	MSG_LOG("Exit\n");
+	mutex_unlock(&uci_dev->mutex);
+	mutex_unlock(&mhi_uci_drv.lock);
+
+}
+
+static int mhi_uci_probe(struct mhi_device *mhi_dev,
+			 const struct mhi_device_id *id)
+{
+	struct uci_dev *uci_dev;
+	int minor;
+	char node_name[32];
+	int dir;
+
+	uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
+	if (!uci_dev)
+		return -ENOMEM;
+
+	mutex_init(&uci_dev->mutex);
+	uci_dev->mhi_dev = mhi_dev;
+
+	minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
+	if (minor >= MAX_UCI_DEVICES) {
+		kfree(uci_dev);
+		return -ENOSPC;
+	}
+
+	mutex_lock(&uci_dev->mutex);
+	mutex_lock(&mhi_uci_drv.lock);
+
+	uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
+	uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
+				     uci_dev->devt, uci_dev,
+				     DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
+				     mhi_dev->dev_id, mhi_dev->domain,
+				     mhi_dev->bus, mhi_dev->slot, "_pipe_",
+				     mhi_dev->ul_chan_id);
+	set_bit(minor, uci_minors);
+
+	/* create debugging buffer */
+	snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
+		 mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
+		 mhi_dev->ul_chan_id);
+	uci_dev->ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
+						  node_name, 0);
+
+	for (dir = 0; dir < 2; dir++) {
+		struct uci_chan *uci_chan = (dir) ?
+			&uci_dev->ul_chan : &uci_dev->dl_chan;
+		spin_lock_init(&uci_chan->lock);
+		init_waitqueue_head(&uci_chan->wq);
+		INIT_LIST_HEAD(&uci_chan->pending);
+	};
+
+	uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
+	mhi_device_set_devdata(mhi_dev, uci_dev);
+	uci_dev->enabled = true;
+
+	list_add(&uci_dev->node, &mhi_uci_drv.head);
+	mutex_unlock(&mhi_uci_drv.lock);
+	mutex_unlock(&uci_dev->mutex);
+
+	MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
+
+	return 0;
+};
+
+static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
+			   struct mhi_result *mhi_result)
+{
+	struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+	struct uci_chan *uci_chan = &uci_dev->ul_chan;
+
+	MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
+		 mhi_result->bytes_xferd);
+
+	kfree(mhi_result->buf_addr);
+	if (!mhi_result->transaction_status)
+		wake_up(&uci_chan->wq);
+}
+
+static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
+			   struct mhi_result *mhi_result)
+{
+	struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
+	unsigned long flags;
+	struct uci_buf *buf;
+
+	MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status,
+		 mhi_result->bytes_xferd);
+
+	if (mhi_result->transaction_status == -ENOTCONN) {
+		kfree(mhi_result->buf_addr);
+		return;
+	}
+
+	spin_lock_irqsave(&uci_chan->lock, flags);
+	buf = mhi_result->buf_addr + uci_dev->mtu;
+	buf->data = mhi_result->buf_addr;
+	buf->len = mhi_result->bytes_xferd;
+	list_add_tail(&buf->node, &uci_chan->pending);
+	spin_unlock_irqrestore(&uci_chan->lock, flags);
+
+	if (mhi_dev->dev.power.wakeup)
+		__pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
+
+	wake_up(&uci_chan->wq);
+}
+
+static void mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB reason)
+{
+	struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+	struct uci_chan *uci_chan = &uci_dev->dl_chan;
+	unsigned long flags;
+
+	if (reason == MHI_CB_DTR_SIGNAL) {
+		spin_lock_irqsave(&uci_chan->lock, flags);
+		uci_dev->tiocm = mhi_dev->tiocm;
+		spin_unlock_irqrestore(&uci_chan->lock, flags);
+		wake_up(&uci_chan->wq);
+	}
+}
+
+/* .driver_data stores max mtu */
+static const struct mhi_device_id mhi_uci_match_table[] = {
+	{ .chan = "LOOPBACK", .driver_data = 0x1000 },
+	{ .chan = "SAHARA", .driver_data = 0x8000 },
+	{ .chan = "EFS", .driver_data = 0x1000 },
+	{ .chan = "QMI0", .driver_data = 0x1000 },
+	{ .chan = "QMI1", .driver_data = 0x1000 },
+	{ .chan = "TF", .driver_data = 0x1000 },
+	{ .chan = "DUN", .driver_data = 0x1000 },
+	{},
+};
+
+static struct mhi_driver mhi_uci_driver = {
+	.id_table = mhi_uci_match_table,
+	.remove = mhi_uci_remove,
+	.probe = mhi_uci_probe,
+	.ul_xfer_cb = mhi_ul_xfer_cb,
+	.dl_xfer_cb = mhi_dl_xfer_cb,
+	.status_cb = mhi_status_cb,
+	.driver = {
+		.name = MHI_UCI_DRIVER_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int mhi_uci_init(void)
+{
+	int ret;
+
+	ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
+	if (ret < 0)
+		return ret;
+
+	mhi_uci_drv.major = ret;
+	mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
+	if (IS_ERR(mhi_uci_drv.class))
+		return -ENODEV;
+
+	mutex_init(&mhi_uci_drv.lock);
+	INIT_LIST_HEAD(&mhi_uci_drv.head);
+
+	ret = mhi_driver_register(&mhi_uci_driver);
+	if (ret)
+		class_destroy(mhi_uci_drv.class);
+
+	return ret;
+}
+
+module_init(mhi_uci_init);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("MHI_UCI");
+MODULE_DESCRIPTION("MHI UCI Driver");
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 676ffe8..9033c23 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2722,6 +2722,7 @@
 	VERIFY(err, (fl && ud));
 	if (err)
 		goto bail;
+	mutex_lock(&fl->map_mutex);
 	mutex_lock(&fl->fl_map_mutex);
 	if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
 		pr_err("adsprpc: mapping not found to unmap %d va %llx %x\n",
@@ -2729,11 +2730,13 @@
 			(unsigned int)ud->len);
 		err = -1;
 		mutex_unlock(&fl->fl_map_mutex);
+		mutex_unlock(&fl->map_mutex);
 		goto bail;
 	}
 	if (map)
 		fastrpc_mmap_free(map, 0);
 	mutex_unlock(&fl->fl_map_mutex);
+	mutex_unlock(&fl->map_mutex);
 bail:
 	return err;
 }
diff --git a/drivers/clk/msm/clock-cpu-8939.c b/drivers/clk/msm/clock-cpu-8939.c
index 5b8b8f3..1bb7e5d 100644
--- a/drivers/clk/msm/clock-cpu-8939.c
+++ b/drivers/clk/msm/clock-cpu-8939.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1081,7 +1081,7 @@
 
 	/* Wait for update to take effect */
 	for (count = 500; count > 0; count--) {
-		if (!(readl_relaxed(base)) & BIT(0))
+		if (!(readl_relaxed(base) & BIT(0)))
 			break;
 		udelay(1);
 	}
diff --git a/drivers/clk/msm/clock-cpu-sdm632.c b/drivers/clk/msm/clock-cpu-sdm632.c
index 58a2520..dbba36f 100644
--- a/drivers/clk/msm/clock-cpu-sdm632.c
+++ b/drivers/clk/msm/clock-cpu-sdm632.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1104,7 +1104,7 @@
 
 	/* Wait for update to take effect */
 	for (count = 500; count > 0; count--) {
-		if (!(readl_relaxed(base)) & BIT(0))
+		if (!(readl_relaxed(base) & BIT(0)))
 			break;
 		udelay(1);
 	}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index fdf2888..88d061d 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
  * Copyright (C) 2009 Intel Corporation
  *
@@ -1314,7 +1314,7 @@
 
 	if (!idx) {
 		stop_critical_timings();
-		wfi();
+		cpu_do_idle();
 		start_critical_timings();
 		return 1;
 	}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2f936a7..78591ea 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -83,6 +83,16 @@
 	  default case is N. Details and instructions how to build your own
 	  EDID data are given in Documentation/EDID/HOWTO.txt.
 
+config DRM_DP_CEC
+	bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+	select CEC_CORE
+	help
+	  Choose this option if you want to enable HDMI CEC support for
+	  DisplayPort/USB-C to HDMI adapters.
+
+	  Note: not all adapters support this feature, and even for those
+	  that do support this they often do not hook up the CEC pin.
+
 config DRM_TTM
 	tristate
 	depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 25c7204..4a7766c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,6 +33,7 @@
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
new file mode 100644
index 0000000..ddb1c5a
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DisplayPort CEC-Tunneling-over-AUX support
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drm_dp_helper.h>
+#include <media/cec.h>
+
+/*
+ * Unfortunately it turns out that we have a chicken-and-egg situation
+ * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
+ * have a converter chip that supports CEC-Tunneling-over-AUX (usually the
+ * Parade PS176), but they do not wire up the CEC pin, thus making CEC
+ * useless.
+ *
+ * Sadly there is no way for this driver to know this. What happens is
+ * that a /dev/cecX device is created that is isolated and unable to see
+ * any of the other CEC devices. Quite literally the CEC wire is cut
+ * (or in this case, never connected in the first place).
+ *
+ * The reason so few adapters support this is that this tunneling protocol
+ * was never supported by any OS. So there was no easy way of testing it,
+ * and no incentive to correctly wire up the CEC pin.
+ *
+ * Hopefully by creating this driver it will be easier for vendors to
+ * finally fix their adapters and test the CEC functionality.
+ *
+ * I keep a list of known working adapters here:
+ *
+ * https://hverkuil.home.xs4all.nl/cec-status.txt
+ *
+ * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * and is not yet listed there.
+ *
+ * Note that the current implementation does not support CEC over an MST hub.
+ * As far as I can see there is no mechanism defined in the DisplayPort
+ * standard to transport CEC interrupts over an MST device. It might be
+ * possible to do this through polling, but I have not been able to get that
+ * to work.
+ */
+
+/**
+ * DOC: dp cec helpers
+ *
+ * These functions take care of supporting the CEC-Tunneling-over-AUX
+ * feature of DisplayPort-to-HDMI adapters.
+ */
+
+/*
+ * When the EDID is unset because the HPD went low, then the CEC DPCD registers
+ * typically can no longer be read (true for a DP-to-HDMI adapter since it is
+ * powered by the HPD). However, some displays toggle the HPD off and on for a
+ * short period for one reason or another, and that would cause the CEC adapter
+ * to be removed and added again, even though nothing else changed.
+ *
+ * This module parameter sets a delay in seconds before the CEC adapter is
+ * actually unregistered. Only if the HPD does not return within that time will
+ * the CEC adapter be unregistered.
+ *
+ * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
+ * be unregistered for as long as the connector remains registered.
+ *
+ * If it is set to 0, then the CEC adapter will be unregistered immediately as
+ * soon as the HPD disappears.
+ *
+ * The default is one second to prevent short HPD glitches from unregistering
+ * the CEC adapter.
+ *
+ * Note that for integrated HDMI branch devices that support CEC the DPCD
+ * registers remain available even if the HPD goes low since it is not powered
+ * by the HPD. In that case the CEC adapter will never be unregistered during
+ * the life time of the connector. At least, this is the theory since I do not
+ * have hardware with an integrated HDMI branch device that supports CEC.
+ */
+#define NEVER_UNREG_DELAY 1000
+static unsigned int drm_dp_cec_unregister_delay = 1;
+module_param(drm_dp_cec_unregister_delay, uint, 0600);
+MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
+		 "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
+
+static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+	struct drm_dp_aux *aux = cec_get_drvdata(adap);
+	u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
+	ssize_t err = 0;
+
+	err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+	return (enable && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+	struct drm_dp_aux *aux = cec_get_drvdata(adap);
+	/* Bit 15 (logical address 15) should always be set */
+	u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
+	u8 mask[2];
+	ssize_t err;
+
+	if (addr != CEC_LOG_ADDR_INVALID)
+		la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
+	mask[0] = la_mask & 0xff;
+	mask[1] = la_mask >> 8;
+	err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+	return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+				    u32 signal_free_time, struct cec_msg *msg)
+{
+	struct drm_dp_aux *aux = cec_get_drvdata(adap);
+	unsigned int retries = min(5, attempts - 1);
+	ssize_t err;
+
+	err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
+				msg->msg, msg->len);
+	if (err < 0)
+		return err;
+
+	err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
+				 (msg->len - 1) | (retries << 4) |
+				 DP_CEC_TX_MESSAGE_SEND);
+	return err < 0 ? err : 0;
+}
+
+static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+					      bool enable)
+{
+	struct drm_dp_aux *aux = cec_get_drvdata(adap);
+	ssize_t err;
+	u8 val;
+
+	if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
+		return 0;
+
+	err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
+	if (err >= 0) {
+		if (enable)
+			val |= DP_CEC_SNOOPING_ENABLE;
+		else
+			val &= ~DP_CEC_SNOOPING_ENABLE;
+		err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+	}
+	return (enable && err < 0) ? err : 0;
+}
+
+static void drm_dp_cec_adap_status(struct cec_adapter *adap,
+				   struct seq_file *file)
+{
+	struct drm_dp_aux *aux = cec_get_drvdata(adap);
+	struct drm_dp_desc desc;
+	struct drm_dp_dpcd_ident *id = &desc.ident;
+
+	if (drm_dp_read_desc(aux, &desc, true))
+		return;
+	seq_printf(file, "OUI: %*pdH\n",
+		   (int)sizeof(id->oui), id->oui);
+	seq_printf(file, "ID: %*pE\n",
+		   (int)strnlen(id->device_id, sizeof(id->device_id)),
+		   id->device_id);
+	seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
+	/*
+	 * Show this both in decimal and hex: at least one vendor
+	 * always reports this in hex.
+	 */
+	seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
+		   id->sw_major_rev, id->sw_minor_rev,
+		   id->sw_major_rev, id->sw_minor_rev);
+}
+
+static const struct cec_adap_ops drm_dp_cec_adap_ops = {
+	.adap_enable = drm_dp_cec_adap_enable,
+	.adap_log_addr = drm_dp_cec_adap_log_addr,
+	.adap_transmit = drm_dp_cec_adap_transmit,
+	.adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
+	.adap_status = drm_dp_cec_adap_status,
+};
+
+static int drm_dp_cec_received(struct drm_dp_aux *aux)
+{
+	struct cec_adapter *adap = aux->cec.adap;
+	struct cec_msg msg;
+	u8 rx_msg_info;
+	ssize_t err;
+
+	err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+	if (err < 0)
+		return err;
+
+	if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
+		return 0;
+
+	msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
+	err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+	if (err < 0)
+		return err;
+
+	cec_received_msg(adap, &msg);
+	return 0;
+}
+
+static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
+{
+	struct cec_adapter *adap = aux->cec.adap;
+	u8 flags;
+
+	if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+		return;
+
+	if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
+		drm_dp_cec_received(aux);
+
+	if (flags & DP_CEC_TX_MESSAGE_SENT)
+		cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+	else if (flags & DP_CEC_TX_LINE_ERROR)
+		cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
+						CEC_TX_STATUS_MAX_RETRIES);
+	else if (flags &
+		 (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
+		cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
+						CEC_TX_STATUS_MAX_RETRIES);
+	drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+}
+
+/**
+ * drm_dp_cec_irq() - handle CEC interrupt, if any
+ * @aux: DisplayPort AUX channel
+ *
+ * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
+ * is present, then it will check for a CEC_IRQ and handle it accordingly.
+ */
+void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+	u8 cec_irq;
+	int ret;
+
+	mutex_lock(&aux->cec.lock);
+	if (!aux->cec.adap)
+		goto unlock;
+
+	ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+				&cec_irq);
+	if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
+		goto unlock;
+
+	drm_dp_cec_handle_irq(aux);
+	drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+unlock:
+	mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_irq);
+
+static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
+{
+	u8 cap = 0;
+
+	if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+	    !(cap & DP_CEC_TUNNELING_CAPABLE))
+		return false;
+	if (cec_cap)
+		*cec_cap = cap;
+	return true;
+}
+
+/*
+ * Called if the HPD was low for more than drm_dp_cec_unregister_delay
+ * seconds. This unregisters the CEC adapter.
+ */
+static void drm_dp_cec_unregister_work(struct work_struct *work)
+{
+	struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
+					      cec.unregister_work.work);
+
+	mutex_lock(&aux->cec.lock);
+	cec_unregister_adapter(aux->cec.adap);
+	aux->cec.adap = NULL;
+	mutex_unlock(&aux->cec.lock);
+}
+
+/*
+ * A new EDID is set. If there is no CEC adapter, then create one. If
+ * there was a CEC adapter, then check if the CEC adapter properties
+ * were unchanged and just update the CEC physical address. Otherwise
+ * unregister the old CEC adapter and create a new one.
+ */
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
+{
+	u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+	unsigned int num_las = 1;
+	u8 cap;
+
+#ifndef CONFIG_MEDIA_CEC_RC
+	/*
+	 * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+	 * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+	 *
+	 * Do this here as well to ensure the tests against cec_caps are
+	 * correct.
+	 */
+	cec_caps &= ~CEC_CAP_RC;
+#endif
+	cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+	mutex_lock(&aux->cec.lock);
+	if (!drm_dp_cec_cap(aux, &cap)) {
+		/* CEC is not supported, unregister any existing adapter */
+		cec_unregister_adapter(aux->cec.adap);
+		aux->cec.adap = NULL;
+		goto unlock;
+	}
+
+	if (cap & DP_CEC_SNOOPING_CAPABLE)
+		cec_caps |= CEC_CAP_MONITOR_ALL;
+	if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
+		num_las = CEC_MAX_LOG_ADDRS;
+
+	if (aux->cec.adap) {
+		if (aux->cec.adap->capabilities == cec_caps &&
+		    aux->cec.adap->available_log_addrs == num_las) {
+			/* Unchanged, so just set the phys addr */
+			cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+			goto unlock;
+		}
+		/*
+		 * The capabilities changed, so unregister the old
+		 * adapter first.
+		 */
+		cec_unregister_adapter(aux->cec.adap);
+	}
+
+	/* Create a new adapter */
+	aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
+					     aux, aux->cec.name, cec_caps,
+					     num_las);
+	if (IS_ERR(aux->cec.adap)) {
+		aux->cec.adap = NULL;
+		goto unlock;
+	}
+	if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+		cec_delete_adapter(aux->cec.adap);
+		aux->cec.adap = NULL;
+	} else {
+		/*
+		 * Update the phys addr for the new CEC adapter. When called
+		 * from drm_dp_cec_register_connector() edid == NULL, so in
+		 * that case the phys addr is just invalidated.
+		 */
+		cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+	}
+unlock:
+	mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_set_edid);
+
+/*
+ * The EDID disappeared (likely because of the HPD going down).
+ */
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+	cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+	mutex_lock(&aux->cec.lock);
+	if (!aux->cec.adap)
+		goto unlock;
+
+	cec_phys_addr_invalidate(aux->cec.adap);
+	/*
+	 * We're done if we want to keep the CEC device
+	 * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
+	 * DPCD still indicates the CEC capability (expected for an integrated
+	 * HDMI branch device).
+	 */
+	if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
+	    !drm_dp_cec_cap(aux, NULL)) {
+		/*
+		 * Unregister the CEC adapter after drm_dp_cec_unregister_delay
+		 * seconds. This to debounce short HPD off-and-on cycles from
+		 * displays.
+		 */
+		schedule_delayed_work(&aux->cec.unregister_work,
+				      drm_dp_cec_unregister_delay * HZ);
+	}
+unlock:
+	mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_unset_edid);
+
+/**
+ * drm_dp_cec_register_connector() - register a new connector
+ * @aux: DisplayPort AUX channel
+ * @name: name of the CEC device
+ * @parent: parent device
+ *
+ * A new connector was registered with associated CEC adapter name and
+ * CEC adapter parent device. After registering the name and parent
+ * drm_dp_cec_set_edid() is called to check if the connector supports
+ * CEC and to register a CEC adapter if that is the case.
+ */
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+				   struct device *parent)
+{
+	WARN_ON(aux->cec.adap);
+	aux->cec.name = name;
+	aux->cec.parent = parent;
+	INIT_DELAYED_WORK(&aux->cec.unregister_work,
+			  drm_dp_cec_unregister_work);
+
+	drm_dp_cec_set_edid(aux, NULL);
+}
+EXPORT_SYMBOL(drm_dp_cec_register_connector);
+
+/**
+ * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+	if (!aux->cec.adap)
+		return;
+	cancel_delayed_work_sync(&aux->cec.unregister_work);
+	cec_unregister_adapter(aux->cec.adap);
+	aux->cec.adap = NULL;
+}
+EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 876a2d9..dde0623 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -990,6 +990,7 @@
 void drm_dp_aux_init(struct drm_dp_aux *aux)
 {
 	mutex_init(&aux->hw_mutex);
+	mutex_init(&aux->cec.lock);
 
 	aux->ddc.algo = &drm_dp_i2c_algo;
 	aux->ddc.algo_data = aux;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 5899fcf..ace0fac 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -124,7 +124,7 @@
 	if (len > count)
 		len = count;
 
-	/* TODO: make sure that this does not exceed 4K */
+	len = min_t(size_t, len, SZ_4K);
 	if (copy_to_user(buff, buf, len)) {
 		kfree(buf);
 		return -EFAULT;
@@ -182,7 +182,7 @@
 	if (len > count)
 		len = count;
 
-	/* TODO: make sure that this does not exceed 4K */
+	len = min_t(size_t, len, SZ_4K);
 	if (copy_to_user(buff, buf, len)) {
 		kfree(buf);
 		return -EFAULT;
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
index 9605ab2..7b4963d 100644
--- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh.h
@@ -18,7 +18,7 @@
 #include <linux/slab.h>
 
 #define ST_ASM330LHH_REVISION		"2.0.1"
-#define ST_ASM330LHH_PATCH		"1"
+#define ST_ASM330LHH_PATCH		"2"
 
 #define ST_ASM330LHH_VERSION		"v"	\
 	ST_ASM330LHH_REVISION			\
@@ -220,7 +220,10 @@
 	u8 enable_mask;
 
 	s64 ts_offset;
+	u32 hw_val;
+	u32 hw_val_old;
 	s64 hw_ts;
+	s64 hw_ts_high;
 	s64 delta_ts;
 	s64 ts;
 	s64 tsample;
diff --git a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
index 734d735..c224427 100644
--- a/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
+++ b/drivers/iio/imu/st_asm330lhh/st_asm330lhh_buffer.c
@@ -64,6 +64,8 @@
 	hw->ts_offset = hw->ts;
 	hw->hw_ts_old = 0ull;
 	hw->tsample = 0ull;
+	hw->hw_ts_high = 0ull;
+	hw->hw_val_old = 0ull;
 
 	return hw->tf->write(hw->dev, ST_ASM330LHH_REG_TS2_ADDR, sizeof(data),
 			     &data);
@@ -257,14 +259,12 @@
 	struct iio_dev *iio_dev;
 	__le16 fifo_status;
 	u16 fifo_depth;
-	u32 val;
 	int ts_processed = 0;
 	s64 hw_ts = 0ull, delta_hw_ts, cpu_timestamp;
 
 	ts_irq = hw->ts - hw->delta_ts;
 
-	do
-	{
+	do {
 		err = hw->tf->read(hw->dev, ST_ASM330LHH_REG_FIFO_DIFFL_ADDR,
 				   sizeof(fifo_status), (u8 *)&fifo_status);
 		if (err < 0)
@@ -289,8 +289,14 @@
 				tag = buf[i] >> 3;
 
 				if (tag == ST_ASM330LHH_TS_TAG) {
-					val = get_unaligned_le32(ptr);
-					hw->hw_ts = val * ST_ASM330LHH_TS_DELTA_NS;
+					hw->hw_val = get_unaligned_le32(ptr);
+
+					/* check for timer rollover */
+					if (hw->hw_val < hw->hw_val_old)
+						hw->hw_ts_high++;
+					hw->hw_ts =
+					(hw->hw_val + (hw->hw_ts_high << 32))
+						* ST_ASM330LHH_TS_DELTA_NS;
 					ts_delta_hw_ts = hw->hw_ts - hw->hw_ts_old;
 					hw_ts += ts_delta_hw_ts;
 					ts_delta_offs =
@@ -301,6 +307,7 @@
 
 					ts_irq += (hw->hw_ts + ts_delta_offs);
 					hw->hw_ts_old = hw->hw_ts;
+					hw->hw_val_old = hw->hw_val;
 					ts_processed++;
 
 					if (!hw->tsample)
diff --git a/drivers/input/sensors/smi130/smi130_acc.c b/drivers/input/sensors/smi130/smi130_acc.c
index a63e08e..a6416e4 100644
--- a/drivers/input/sensors/smi130/smi130_acc.c
+++ b/drivers/input/sensors/smi130/smi130_acc.c
@@ -133,7 +133,7 @@
 #include <linux/delay.h>
 #include <asm/irq.h>
 #include <linux/math64.h>
-
+#include <linux/cpu.h>
 #ifdef CONFIG_HAS_EARLYSUSPEND
 #include <linux/earlysuspend.h>
 #endif
@@ -159,7 +159,7 @@
 
 #define SENSOR_NAME                 "smi130_acc"
 #define SMI130_ACC_USE_BASIC_I2C_FUNC        1
-
+#define SMI130_HRTIMER 1
 #define MSC_TIME                6
 #define ABSMIN                      -512
 #define ABSMAX                      512
@@ -1573,7 +1573,6 @@
 	struct mutex enable_mutex;
 	struct mutex mode_mutex;
 	struct delayed_work work;
-	struct work_struct irq_work;
 #ifdef CONFIG_HAS_EARLYSUSPEND
 	struct early_suspend early_suspend;
 #endif
@@ -1611,8 +1610,57 @@
 	struct input_dev *accbuf_dev;
 	int report_evt_cnt;
 #endif
+#ifdef SMI130_HRTIMER
+	struct hrtimer smi130_hrtimer;
+#endif
 };
 
+#ifdef SMI130_HRTIMER
+static void smi130_set_cpu_idle_state(bool value)
+{
+	cpu_idle_poll_ctrl(value);
+}
+static enum hrtimer_restart smi130_timer_function(struct hrtimer *timer)
+{
+	smi130_set_cpu_idle_state(true);
+
+	return HRTIMER_NORESTART;
+}
+static void smi130_hrtimer_reset(struct smi130_acc_data *data)
+{
+	hrtimer_cancel(&data->smi130_hrtimer);
+	/*forward HRTIMER just before 1ms of irq arrival*/
+	hrtimer_forward(&data->smi130_hrtimer, ktime_get(),
+			ns_to_ktime(data->time_odr - 1000000));
+	hrtimer_restart(&data->smi130_hrtimer);
+}
+static void smi130_hrtimer_init(struct smi130_acc_data *data)
+{
+	hrtimer_init(&data->smi130_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	data->smi130_hrtimer.function = smi130_timer_function;
+}
+static void smi130_hrtimer_cleanup(struct smi130_acc_data *data)
+{
+	hrtimer_cancel(&data->smi130_hrtimer);
+}
+#else
+static void smi130_set_cpu_idle_state(bool value)
+{
+}
+static void smi130_hrtimer_reset(struct smi130_acc_data *data)
+{
+
+}
+static void smi130_hrtimer_init(struct smi130_acc_data *data)
+{
+
+}
+static void smi130_hrtimer_remove(struct smi130_acc_data *data)
+{
+
+}
+#endif
+
 #ifdef CONFIG_HAS_EARLYSUSPEND
 static void smi130_acc_early_suspend(struct early_suspend *h);
 static void smi130_acc_late_resume(struct early_suspend *h);
@@ -3871,7 +3919,7 @@
 				signed char sensor_type, short *a_x)
 {
 	int comres = 0;
-	unsigned char data[2];
+	unsigned char data[2] = {0};
 
 	switch (sensor_type) {
 	case 0:
@@ -3940,7 +3988,7 @@
 				signed char sensor_type, short *a_y)
 {
 	int comres = 0;
-	unsigned char data[2];
+	unsigned char data[2] = {0};
 
 	switch (sensor_type) {
 	case 0:
@@ -3998,7 +4046,7 @@
 				signed char sensor_type, short *a_z)
 {
 	int comres = 0;
-	unsigned char data[2];
+	unsigned char data[2] = {0};
 
 	switch (sensor_type) {
 	case 0:
@@ -5087,7 +5135,7 @@
 		signed char sensor_type, struct smi130_accacc *acc)
 {
 	int comres = 0;
-	unsigned char data[6];
+	unsigned char data[6] = {0};
 	struct smi130_acc_data *client_data = i2c_get_clientdata(client);
 #ifndef SMI_ACC2X2_SENSOR_IDENTIFICATION_ENABLE
 	int bitwidth;
@@ -5381,7 +5429,7 @@
 	struct smi130_acc_data *smi130_acc = i2c_get_clientdata(client);
 
 	size_t count = 0;
-	u8 reg[0x40];
+	u8 reg[0x40] = {0};
 	int i;
 
 	for (i = 0; i < 0x40; i++) {
@@ -6962,7 +7010,7 @@
 	if (!client_data->smi_acc_cachepool) {
 		PERR("smi_acc_cachepool cache create failed\n");
 		err = -ENOMEM;
-		goto clean_exit1;
+		return 0;
 	}
 	for (i = 0; i < SMI_ACC_MAXSAMPLE; i++) {
 		client_data->smi130_acc_samplist[i] =
@@ -6970,7 +7018,7 @@
 					GFP_KERNEL);
 		if (!client_data->smi130_acc_samplist[i]) {
 			err = -ENOMEM;
-			goto clean_exit2;
+			goto clean_exit1;
 		}
 	}
 
@@ -6978,7 +7026,7 @@
 	if (!client_data->accbuf_dev) {
 		err = -ENOMEM;
 		PERR("input device allocation failed\n");
-		goto clean_exit3;
+		goto clean_exit1;
 	}
 	client_data->accbuf_dev->name = "smi130_accbuf";
 	client_data->accbuf_dev->id.bustype = BUS_I2C;
@@ -6999,25 +7047,26 @@
 	if (err) {
 		PERR("unable to register input device %s\n",
 				client_data->accbuf_dev->name);
-		goto clean_exit3;
+		goto clean_exit2;
 	}
 
 	client_data->acc_buffer_smi130_samples = true;
 	client_data->acc_enable = false;
 
+	smi130_set_cpu_idle_state(true);
+
 	smi130_acc_set_mode(client, SMI_ACC2X2_MODE_NORMAL, 1);
 	smi130_acc_set_bandwidth(client, SMI_ACC2X2_BW_62_50HZ);
 	smi130_acc_set_range(client, SMI_ACC2X2_RANGE_2G);
 
 	return 1;
 
-clean_exit3:
-	input_free_device(client_data->accbuf_dev);
 clean_exit2:
+	input_free_device(client_data->accbuf_dev);
+clean_exit1:
 	for (i = 0; i < SMI_ACC_MAXSAMPLE; i++)
 		kmem_cache_free(client_data->smi_acc_cachepool,
 				client_data->smi130_acc_samplist[i]);
-clean_exit1:
 	kmem_cache_destroy(client_data->smi_acc_cachepool);
 	return 0;
 }
@@ -7044,10 +7093,9 @@
 }
 #endif
 
-static void smi130_acc_irq_work_func(struct work_struct *work)
+static irqreturn_t smi130_acc_irq_work_func(int irq, void *handle)
 {
-	struct smi130_acc_data *smi130_acc = container_of((struct work_struct *)work,
-			struct smi130_acc_data, irq_work);
+	struct smi130_acc_data *smi130_acc = handle;
 #ifdef CONFIG_DOUBLE_TAP
 	struct i2c_client *client = smi130_acc->smi130_acc_client;
 #endif
@@ -7092,8 +7140,10 @@
 		mutex_unlock(&smi130_acc->value_mutex);
 	}
 	store_acc_boot_sample(smi130_acc, acc.x, acc.y, acc.z, ts);
-#endif
 
+	smi130_set_cpu_idle_state(false);
+	return IRQ_HANDLED;
+#endif
 	smi130_acc_get_interruptstatus1(smi130_acc->smi130_acc_client, &status);
 	PDEBUG("smi130_acc_irq_work_func, status = 0x%x\n", status);
 
@@ -7246,10 +7296,9 @@
 	if (data->smi130_acc_client == NULL)
 		return IRQ_HANDLED;
 	data->timestamp = smi130_acc_get_alarm_timestamp();
+	smi130_hrtimer_reset(data);
 
-	schedule_work(&data->irq_work);
-
-	return IRQ_HANDLED;
+	return IRQ_WAKE_THREAD;
 }
 #endif /* defined(SMI_ACC2X2_ENABLE_INT1)||defined(SMI_ACC2X2_ENABLE_INT2) */
 
@@ -7363,7 +7412,6 @@
 	if (err)
 		PERR("could not request irq\n");
 
-	INIT_WORK(&data->irq_work, smi130_acc_irq_work_func);
 #endif
 
 #ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
@@ -7558,9 +7606,11 @@
 		return -EINVAL;
 	data->IRQ = client->irq;
 	PDEBUG("data->IRQ = %d", data->IRQ);
-	err = request_irq(data->IRQ, smi130_acc_irq_handler, IRQF_TRIGGER_RISING,
+	err = request_threaded_irq(data->IRQ, smi130_acc_irq_handler,
+			smi130_acc_irq_work_func, IRQF_TRIGGER_RISING,
 			"smi130_acc", data);
 
+	smi130_hrtimer_init(data);
 	err = smi130_acc_early_buff_init(client, data);
 	if (!err)
 		goto exit;
@@ -7676,6 +7726,7 @@
 	if (NULL == data)
 		return 0;
 
+	smi130_hrtimer_cleanup(data);
 	smi130_acc_input_cleanup(data);
 	smi130_acc_set_enable(&client->dev, 0);
 #ifdef CONFIG_HAS_EARLYSUSPEND
diff --git a/drivers/input/sensors/smi130/smi130_driver.c b/drivers/input/sensors/smi130/smi130_driver.c
index 42a0a57..3991ada 100644
--- a/drivers/input/sensors/smi130/smi130_driver.c
+++ b/drivers/input/sensors/smi130/smi130_driver.c
@@ -2879,7 +2879,7 @@
 	struct smi_client_data *client_data = input_get_drvdata(input);
 
 	ssize_t ret;
-	u8 reg_data[128], i;
+	u8 reg_data[128] = {0}, i;
 	int pos;
 
 	if (client_data == NULL) {
@@ -2913,7 +2913,8 @@
 	struct input_dev *input = to_input_dev(dev);
 	struct smi_client_data *client_data = input_get_drvdata(input);
 	ssize_t ret;
-	u8 reg_data[32];
+	u8 reg_data[32] = {0};
+
 	int i, j, status, digit;
 
 	if (client_data == NULL) {
diff --git a/drivers/input/sensors/smi130/smi130_gyro_driver.c b/drivers/input/sensors/smi130/smi130_gyro_driver.c
index fd9e87d..e497a0a8 100644
--- a/drivers/input/sensors/smi130/smi130_gyro_driver.c
+++ b/drivers/input/sensors/smi130/smi130_gyro_driver.c
@@ -290,7 +290,6 @@
 	ktime_t work_delay_kt;
 	uint8_t gpio_pin;
 	int16_t IRQ;
-	struct work_struct irq_work;
 #ifdef CONFIG_ENABLE_SMI_ACC_GYRO_BUFFERING
 	bool read_gyro_boot_sample;
 	int gyro_bufsample_cnt;
@@ -422,7 +421,7 @@
 static void smi_gyro_dump_reg(struct i2c_client *client)
 {
 	int i;
-	u8 dbg_buf[64];
+	u8 dbg_buf[64] = {0};
 	u8 dbg_buf_str[64 * 3 + 1] = "";
 
 	for (i = 0; i < BYTES_PER_LINE; i++) {
@@ -1788,7 +1787,7 @@
 	if (!client_data->smi_gyro_cachepool) {
 		PERR("smi_gyro_cachepool cache create failed\n");
 		err = -ENOMEM;
-		goto clean_exit1;
+		return 0;
 	}
 
 	for (i = 0; i < SMI_GYRO_MAXSAMPLE; i++) {
@@ -1797,7 +1796,7 @@
 					GFP_KERNEL);
 		if (!client_data->smi130_gyro_samplist[i]) {
 			err = -ENOMEM;
-			goto clean_exit2;
+			goto clean_exit1;
 		}
 	}
 
@@ -1806,7 +1805,7 @@
 	if (!client_data->gyrobuf_dev) {
 		err = -ENOMEM;
 		PERR("input device allocation failed\n");
-		goto clean_exit3;
+		goto clean_exit1;
 	}
 	client_data->gyrobuf_dev->name = "smi130_gyrobuf";
 	client_data->gyrobuf_dev->id.bustype = BUS_I2C;
@@ -1827,7 +1826,7 @@
 	if (err) {
 		PERR("unable to register input device %s\n",
 				client_data->gyrobuf_dev->name);
-		goto clean_exit3;
+		goto clean_exit2;
 	}
 
 	client_data->gyro_buffer_smi130_samples = true;
@@ -1852,13 +1851,12 @@
 
 	return 1;
 
-clean_exit3:
-	input_free_device(client_data->gyrobuf_dev);
 clean_exit2:
+	input_free_device(client_data->gyrobuf_dev);
+clean_exit1:
 	for (i = 0; i < SMI_GYRO_MAXSAMPLE; i++)
 		kmem_cache_free(client_data->smi_gyro_cachepool,
 				client_data->smi130_gyro_samplist[i]);
-clean_exit1:
 	kmem_cache_destroy(client_data->smi_gyro_cachepool);
 	return 0;
 }
@@ -1895,10 +1893,9 @@
 
 
 #if defined(SMI130_GYRO_ENABLE_INT1) || defined(SMI130_GYRO_ENABLE_INT2)
-static void smi130_gyro_irq_work_func(struct work_struct *work)
+static irqreturn_t smi130_gyro_irq_work_func(int irq, void *handle)
 {
-	struct smi_gyro_client_data *client_data = container_of(work,
-		struct smi_gyro_client_data, irq_work);
+	struct smi_gyro_client_data *client_data = handle;
 	struct smi130_gyro_data_t gyro_data;
 	struct timespec ts;
 	ts = ns_to_timespec(client_data->timestamp);
@@ -1919,14 +1916,14 @@
 	input_sync(client_data->input);
 	store_gyro_boot_sample(client_data, gyro_data.datax,
 			gyro_data.datay, gyro_data.dataz, ts);
+	return IRQ_HANDLED;
 }
 
 static irqreturn_t smi_gyro_irq_handler(int irq, void *handle)
 {
 	struct smi_gyro_client_data *client_data = handle;
 	client_data->timestamp= smi130_gyro_get_alarm_timestamp();
-	schedule_work(&client_data->irq_work);
-	return IRQ_HANDLED;
+	return IRQ_WAKE_THREAD;
 }
 #endif
 static int smi_gyro_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -2098,13 +2095,12 @@
 			PDEBUG("request failed\n");
 		}
 		client_data->IRQ = gpio_to_irq(client_data->gpio_pin);
-		err = request_irq(client_data->IRQ, smi_gyro_irq_handler,
-				IRQF_TRIGGER_RISING,
-				SENSOR_NAME, client_data);
+		err = request_threaded_irq(client_data->IRQ,
+				smi_gyro_irq_handler, smi130_gyro_irq_work_func,
+				IRQF_TRIGGER_RISING, SENSOR_NAME, client_data);
 		if (err < 0)
 			PDEBUG("request handle failed\n");
 	}
-	INIT_WORK(&client_data->irq_work, smi130_gyro_irq_work_func);
 #endif
 
 	err = smi130_gyro_early_buff_init(client_data);
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 180edf3..05ed311 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@
 	struct list_head lnode;
 	struct rb_node node;
 	struct device *dev;
-	struct scatterlist sgl;
+	struct scatterlist *sgl;
 	unsigned int nents;
 	enum dma_data_direction dir;
 	struct msm_iommu_meta *meta;
@@ -148,6 +148,22 @@
 
 static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
 
+static struct scatterlist *clone_sgl(struct scatterlist *sg, int nents)
+{
+	struct scatterlist *next, *s;
+	int i;
+	struct sg_table table;
+
+	if (sg_alloc_table(&table, nents, GFP_KERNEL))
+		return NULL;
+	next = table.sgl;
+	for_each_sg(sg, s, nents, i) {
+		*next = *s;
+		next = sg_next(next);
+	}
+	return table.sgl;
+}
+
 static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
 				   int nents, enum dma_data_direction dir,
 				   struct dma_buf *dma_buf,
@@ -191,20 +207,25 @@
 		}
 
 		ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
-		if (ret != nents) {
+		if (!ret) {
 			kfree(iommu_map);
 			goto out_unlock;
 		}
 
+		iommu_map->sgl = clone_sgl(sg, nents);
+		if (!iommu_map->sgl) {
+			kfree(iommu_map);
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+		iommu_map->nents = nents;
+		iommu_map->dev = dev;
+
 		kref_init(&iommu_map->ref);
 		if (late_unmap)
 			kref_get(&iommu_map->ref);
 		iommu_map->meta = iommu_meta;
-		iommu_map->sgl.dma_address = sg->dma_address;
-		iommu_map->sgl.dma_length = sg->dma_length;
-		iommu_map->dev = dev;
 		iommu_map->dir = dir;
-		iommu_map->nents = nents;
 		iommu_map->map_attrs = attrs;
 		iommu_map->buf_start_addr = sg_phys(sg);
 		msm_iommu_add(iommu_meta, iommu_map);
@@ -214,8 +235,8 @@
 		    dir == iommu_map->dir &&
 		    attrs == iommu_map->map_attrs &&
 		    sg_phys(sg) == iommu_map->buf_start_addr) {
-			sg->dma_address = iommu_map->sgl.dma_address;
-			sg->dma_length = iommu_map->sgl.dma_length;
+			sg->dma_address = iommu_map->sgl->dma_address;
+			sg->dma_length = iommu_map->sgl->dma_length;
 
 			kref_get(&iommu_map->ref);
 			if (is_device_dma_coherent(dev))
@@ -315,9 +336,14 @@
 {
 	struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
 						ref);
+	struct sg_table table;
 
+	table.nents = table.orig_nents = map->nents;
+	table.sgl = map->sgl;
 	list_del(&map->lnode);
-	dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
+
+	dma_unmap_sg(map->dev, map->sgl, map->nents, map->dir);
+	sg_free_table(&table);
 	kfree(map);
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
index 2eb00eb..834b535 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -208,7 +208,7 @@
 {
 	int rc = 0;
 
-	if (!pdev || !&pdev->dev || !clk_info || !clk_ptr || !num_clk)
+	if (!pdev || (&pdev->dev == NULL) || !clk_info || !clk_ptr || !num_clk)
 		return -EINVAL;
 
 	rc = msm_camera_get_clk_info_internal(&pdev->dev,
@@ -513,7 +513,7 @@
 {
 	int rc = 0;
 
-	if (!pdev || !&pdev->dev || !clk_info || !clk_ptr)
+	if (!pdev || (&pdev->dev == NULL) || !clk_info || !clk_ptr)
 		return -EINVAL;
 
 	rc = msm_camera_put_clk_info_internal(&pdev->dev,
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index bcd9274..80b250d 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -636,7 +636,7 @@
 {
 	struct msm_command_ack *cmd_ack = d1;
 
-	if (!(&cmd_ack->command_q))
+	if (&cmd_ack->command_q == NULL)
 		return 0;
 
 	msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
@@ -646,7 +646,7 @@
 
 static void msm_remove_session_cmd_ack_q(struct msm_session *session)
 {
-	if ((!session) || !(&session->command_ack_q))
+	if ((!session) || (&session->command_ack_q == NULL))
 		return;
 
 	mutex_lock(&session->lock);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index 89a5ea0..5f2cce1 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -389,7 +389,8 @@
 {
 	int32_t rc = 0;
 
-	if (!(&flash_ctrl->power_info) || !(&flash_ctrl->flash_i2c_client)) {
+	if ((&flash_ctrl->power_info == NULL) ||
+		(&flash_ctrl->flash_i2c_client == NULL)) {
 		pr_err("%s:%d failed: %pK %pK\n",
 			__func__, __LINE__, &flash_ctrl->power_info,
 			&flash_ctrl->flash_i2c_client);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
index 9e9ef92..a33436e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1350,7 +1350,7 @@
 	CDBG("%s: %d GPIO offset: %d, seq_val: %d\n", __func__, __LINE__,
 		gpio_offset, seq_val);
 
-	if ((gconf->gpio_num_info->valid[gpio_offset] == 1)) {
+	if (gconf->gpio_num_info->valid[gpio_offset] == 1) {
 		gpio_set_value_cansleep(
 			gconf->gpio_num_info->gpio_num
 			[gpio_offset], val);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index d8d7985..4b51846 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1850,34 +1850,6 @@
 	return rc;
 }
 
-static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index)
-{
-	struct hfi_queue_header *queue;
-	struct vidc_iface_q_info *q_info;
-	u32 write_ptr, read_ptr;
-
-	if (q_index >= VIDC_IFACEQ_NUMQ) {
-		dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index);
-		return -ENOENT;
-	}
-
-	q_info = &dev->iface_queues[q_index];
-	if (!q_info) {
-		dprintk(VIDC_ERR, "cannot read shared Q's\n");
-		return -ENOENT;
-	}
-
-	queue = (struct hfi_queue_header *)q_info->q_hdr;
-	if (!queue) {
-		dprintk(VIDC_ERR, "queue not present\n");
-		return -ENOENT;
-	}
-
-	write_ptr = (u32)queue->qhdr_write_idx;
-	read_ptr = (u32)queue->qhdr_read_idx;
-	return read_ptr - write_ptr;
-}
-
 static void __core_clear_interrupt(struct venus_hfi_device *device)
 {
 	u32 intr_status = 0;
@@ -3206,8 +3178,7 @@
 			*session_id = session->session_id;
 		}
 
-		if (packet_count >= max_packets &&
-				__get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) {
+		if (packet_count >= max_packets) {
 			dprintk(VIDC_WARN,
 					"Too many packets in message queue to handle at once, deferring read\n");
 			break;
diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
index 2d0e353..550e060 100644
--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
@@ -2289,34 +2289,6 @@
 	return rc;
 }
 
-static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index)
-{
-	struct hfi_queue_header *queue;
-	struct vidc_iface_q_info *q_info;
-	u32 write_ptr, read_ptr;
-
-	if (q_index >= VIDC_IFACEQ_NUMQ) {
-		dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index);
-		return -ENOENT;
-	}
-
-	q_info = &dev->iface_queues[q_index];
-	if (!q_info) {
-		dprintk(VIDC_ERR, "cannot read shared Q's\n");
-		return -ENOENT;
-	}
-
-	queue = (struct hfi_queue_header *)q_info->q_hdr;
-	if (!queue) {
-		dprintk(VIDC_ERR, "queue not present\n");
-		return -ENOENT;
-	}
-
-	write_ptr = (u32)queue->qhdr_write_idx;
-	read_ptr = (u32)queue->qhdr_read_idx;
-	return read_ptr - write_ptr;
-}
-
 static void __core_clear_interrupt(struct venus_hfi_device *device)
 {
 	u32 intr_status = 0;
@@ -3605,8 +3577,7 @@
 			*session_id = session->session_id;
 		}
 
-		if (packet_count >= max_packets &&
-				__get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) {
+		if (packet_count >= max_packets) {
 			dprintk(VIDC_WARN,
 					"Too many packets in message queue to handle at once, deferring read\n");
 			break;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 67d8e33..e238cb4 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1814,6 +1814,16 @@
 			if (ptr_svc->svc.listener_id == lstnr) {
 				ptr_svc->listener_in_use = true;
 				ptr_svc->rcv_req_flag = 1;
+				rc = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt,
+					ptr_svc->sb_length,
+					ION_IOC_INV_CACHES);
+				if (rc) {
+					pr_err("cache opp failed %d\n", rc);
+					status = QSEOS_RESULT_FAILURE;
+					goto err_resp;
+				}
 				wake_up_interruptible(&ptr_svc->rcv_req_wq);
 				break;
 			}
@@ -2142,6 +2152,16 @@
 			if (ptr_svc->svc.listener_id == lstnr) {
 				ptr_svc->listener_in_use = true;
 				ptr_svc->rcv_req_flag = 1;
+				rc = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt,
+					ptr_svc->sb_length,
+					ION_IOC_INV_CACHES);
+				if (rc) {
+					pr_err("cache opp failed %d\n", rc);
+					status = QSEOS_RESULT_FAILURE;
+					goto err_resp;
+				}
 				wake_up_interruptible(&ptr_svc->rcv_req_wq);
 				break;
 			}
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 93ceea4..ab0120c 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -834,9 +834,21 @@
 	int err = 0;
 	struct ubi_ainf_peb *aeb, *tmp_aeb;
 
-	if (!list_empty(&ai->free)) {
-		aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+	list_for_each_entry_safe(aeb, tmp_aeb, &ai->free, u.list) {
 		list_del(&aeb->u.list);
+		if (aeb->ec == UBI_UNKNOWN) {
+			ubi_err(ubi, "PEB %d in freelist has unknown EC",
+							aeb->pnum);
+			aeb->ec = ai->mean_ec;
+		}
+		err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+		if (err) {
+			ubi_err(ubi, "Erase failed for PEB %d in freelist",
+							aeb->pnum);
+			list_add(&aeb->u.list, &ai->erase);
+		continue;
+		}
+		aeb->ec += 1;
 		dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
 		return aeb;
 	}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 541c179..8d28b22 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -125,6 +125,9 @@
 
 static ssize_t dev_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf);
+static ssize_t dev_attribute_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count);
 
 /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
 static struct device_attribute dev_eraseblock_size =
@@ -151,6 +154,13 @@
 	__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
 static struct device_attribute dev_ro_mode =
 	__ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_trigger_scrub =
+	__ATTR(scrub_all, 0644,
+		dev_attribute_show, dev_attribute_store);
+static struct device_attribute dev_mtd_max_scrub_sqnum =
+	__ATTR(scrub_max_sqnum, 0444, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_min_scrub_sqnum =
+	__ATTR(scrub_min_sqnum, 0444, dev_attribute_show, NULL);
 
 /**
  * ubi_volume_notify - send a volume change notification.
@@ -343,6 +353,17 @@
 	return ubi_num;
 }
 
+static unsigned long long get_max_sqnum(struct ubi_device *ubi)
+{
+	unsigned long long max_sqnum;
+
+	spin_lock(&ubi->ltree_lock);
+	max_sqnum = ubi->global_sqnum - 1;
+	spin_unlock(&ubi->ltree_lock);
+
+	return max_sqnum;
+}
+
 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
 static ssize_t dev_attribute_show(struct device *dev,
 				  struct device_attribute *attr, char *buf)
@@ -389,6 +410,15 @@
 		ret = sprintf(buf, "%d\n", ubi->mtd->index);
 	else if (attr == &dev_ro_mode)
 		ret = sprintf(buf, "%d\n", ubi->ro_mode);
+	else if (attr == &dev_mtd_trigger_scrub)
+		ret = snprintf(buf, PAGE_SIZE, "%d\n",
+					atomic_read(&ubi->scrub_work_count));
+	else if (attr == &dev_mtd_max_scrub_sqnum)
+		ret = snprintf(buf, PAGE_SIZE, "%llu\n",
+					get_max_sqnum(ubi));
+	else if (attr == &dev_mtd_min_scrub_sqnum)
+		ret = snprintf(buf, PAGE_SIZE, "%llu\n",
+					ubi_wl_scrub_get_min_sqnum(ubi));
 	else
 		ret = -EINVAL;
 
@@ -409,10 +439,45 @@
 	&dev_bgt_enabled.attr,
 	&dev_mtd_num.attr,
 	&dev_ro_mode.attr,
+	&dev_mtd_trigger_scrub.attr,
+	&dev_mtd_max_scrub_sqnum.attr,
+	&dev_mtd_min_scrub_sqnum.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(ubi_dev);
 
+static ssize_t dev_attribute_store(struct device *dev,
+			   struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	int ret = count;
+	struct ubi_device *ubi;
+	unsigned long long scrub_sqnum;
+
+	ubi = container_of(dev, struct ubi_device, dev);
+	ubi = ubi_get_device(ubi->ubi_num);
+	if (!ubi)
+		return -ENODEV;
+
+	if (attr == &dev_mtd_trigger_scrub) {
+		if (kstrtoull(buf, 10, &scrub_sqnum)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (!ubi->lookuptbl) {
+			pr_err("lookuptbl is null");
+			goto out;
+		}
+		ret = ubi_wl_scrub_all(ubi, scrub_sqnum);
+		if (ret == 0)
+			ret = count;
+	}
+
+out:
+	ubi_put_device(ubi);
+	return ret;
+}
+
 static void dev_release(struct device *dev)
 {
 	struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index b6fb8f9..f036165 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1105,6 +1105,16 @@
 	dbg_io("write VID header to PEB %d", pnum);
 	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count);
 
+	/*
+	 * Re-erase the PEB before using it. This should minimize any issues
+	 * from decay of charge in this block.
+	 */
+	if (ubi->wl_is_inited) {
+		err = ubi_wl_re_erase_peb(ubi, pnum);
+		if (err)
+			return err;
+	}
+
 	err = self_check_peb_ec_hdr(ubi, pnum);
 	if (err)
 		return err;
@@ -1123,6 +1133,8 @@
 
 	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			   ubi->vid_hdr_alsize);
+	if (!err && ubi->wl_is_inited)
+		ubi_wl_update_peb_sqnum(ubi, pnum, vid_hdr);
 	return err;
 }
 
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 697dbcb..13efd64 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -183,6 +183,8 @@
  * @u.list: link in the protection queue
  * @ec: erase counter
  * @pnum: physical eraseblock number
+ * @tagged_scrub_all: if the entry is tagged for scrub all
+ * @sqnum: The sequence number of the vol header.
  *
  * This data structure is used in the WL sub-system. Each physical eraseblock
  * has a corresponding &struct wl_entry object which may be kept in different
@@ -195,6 +197,8 @@
 	} u;
 	int ec;
 	int pnum;
+	unsigned int tagged_scrub_all:1;
+	unsigned long long sqnum;
 };
 
 /**
@@ -624,6 +628,9 @@
 	struct task_struct *bgt_thread;
 	int thread_enabled;
 	char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+	bool scrub_in_progress;
+	atomic_t scrub_work_count;
+	int wl_is_inited;
 
 	/* I/O sub-system's stuff */
 	long long flash_size;
@@ -919,6 +926,12 @@
 int ubi_is_erase_work(struct ubi_work *wrk);
 void ubi_refill_pools(struct ubi_device *ubi);
 int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+ssize_t ubi_wl_scrub_all(struct ubi_device *ubi,
+			 unsigned long long scrub_sqnum);
+void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum,
+				struct ubi_vid_hdr *vid_hdr);
+unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi);
+int ubi_wl_re_erase_peb(struct ubi_device *ubi, int pnum);
 
 /* io.c */
 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index a8f74d9..5c0332e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -102,6 +102,7 @@
 #include <linux/crc32.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
+#include <linux/delay.h>
 #include "ubi.h"
 #include "wl.h"
 
@@ -487,6 +488,42 @@
 	return err;
 }
 
+int ubi_wl_re_erase_peb(struct ubi_device *ubi, int pnum)
+{
+	int err;
+	struct ubi_wl_entry *e;
+	struct ubi_ec_hdr *ec_hdr;
+
+	spin_lock(&ubi->wl_lock);
+	e = ubi->lookuptbl[pnum];
+	spin_unlock(&ubi->wl_lock);
+
+	dbg_wl("Re-erase PEB %d,  EC %u", e->pnum, e->ec);
+
+	err = self_check_ec(ubi, e->pnum, e->ec);
+	if (err)
+		return -EINVAL;
+
+	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+	if (!ec_hdr)
+		return -ENOMEM;
+
+	err = ubi_io_sync_erase(ubi, e->pnum, 0);
+	if (err < 0)
+		goto out_free;
+
+	dbg_wl("re-erased PEB %d, EC %u", e->pnum, e->ec);
+
+	ec_hdr->ec = cpu_to_be64(e->ec);
+
+	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
+	if (err)
+		goto out_free;
+out_free:
+	kfree(ec_hdr);
+	return err;
+}
+
 /**
  * serve_prot_queue - check if it is time to stop protecting PEBs.
  * @ubi: UBI device description object
@@ -862,9 +899,20 @@
 	}
 
 	/* The PEB has been successfully moved */
-	if (scrubbing)
-		ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
-			e1->pnum, vol_id, lnum, e2->pnum);
+	if (scrubbing) {
+		spin_lock(&ubi->wl_lock);
+		if (e1->tagged_scrub_all) {
+			WARN_ON(atomic_read(&ubi->scrub_work_count) <= 0);
+			atomic_dec(&ubi->scrub_work_count);
+			e1->tagged_scrub_all = 0;
+			e2->tagged_scrub_all = 0;
+		} else {
+			ubi_msg(ubi,
+				"scrubbed PEB %d (LEB %d:%d),data moved to PEB %d",
+				e1->pnum, vol_id, lnum, e2->pnum);
+		}
+		spin_unlock(&ubi->wl_lock);
+	}
 	ubi_free_vid_buf(vidb);
 
 	spin_lock(&ubi->wl_lock);
@@ -1226,6 +1274,7 @@
 retry:
 	spin_lock(&ubi->wl_lock);
 	e = ubi->lookuptbl[pnum];
+	e->sqnum = UBI_UNKNOWN;
 	if (e == ubi->move_from) {
 		/*
 		 * User is putting the physical eraseblock which was selected to
@@ -1262,6 +1311,20 @@
 		} else if (in_wl_tree(e, &ubi->scrub)) {
 			self_check_in_wl_tree(ubi, e, &ubi->scrub);
 			rb_erase(&e->u.rb, &ubi->scrub);
+
+			/*
+			 * Since this PEB has been put we dont need to worry
+			 * about it anymore
+			 */
+			if (e->tagged_scrub_all) {
+				int wrk_count;
+
+				wrk_count = atomic_read(&ubi->scrub_work_count);
+				WARN_ON(wrk_count <= 0);
+
+				atomic_dec(&ubi->scrub_work_count);
+				e->tagged_scrub_all = 0;
+			}
 		} else if (in_wl_tree(e, &ubi->erroneous)) {
 			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
 			rb_erase(&e->u.rb, &ubi->erroneous);
@@ -1294,6 +1357,197 @@
 }
 
 /**
+ * ubi_wl_scrub_get_min_sqnum - Return the minimum sqnum of the used/pq/scrub.
+ * @ubi: UBI device description object
+ *
+ * This function returns the minimum sqnum of the PEB that are currently in use.
+ *
+ * Return the min sqnum if there are any used PEB's otherwise return ~(0)
+ *
+ */
+unsigned long long ubi_wl_scrub_get_min_sqnum(struct ubi_device *ubi)
+{
+	int i;
+	struct ubi_wl_entry *e, *tmp;
+	struct rb_node *node;
+	unsigned long long min_sqnum = ~0;
+
+	spin_lock(&ubi->wl_lock);
+
+	/* Go through the pq list */
+	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+			if (e->sqnum < min_sqnum)
+				min_sqnum = e->sqnum;
+		}
+	}
+
+	/* Go through used PEB tree */
+	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
+		e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		self_check_in_wl_tree(ubi, e, &ubi->used);
+		if (e->sqnum < min_sqnum)
+			min_sqnum = e->sqnum;
+	}
+	/* Go through scrub PEB tree */
+	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+		e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		self_check_in_wl_tree(ubi, e, &ubi->scrub);
+		if (e->sqnum < min_sqnum)
+			min_sqnum = e->sqnum;
+	}
+	spin_unlock(&ubi->wl_lock);
+	return min_sqnum;
+}
+
+/**
+ * ubi_wl_update_peb_sqnum - Update the vol hdr sqnum of the PEB.
+ * @pnum: The PEB number.
+ * @vid_hdr: The vol hdr being written to the PEB.
+ *
+ */
+void ubi_wl_update_peb_sqnum(struct ubi_device *ubi, int pnum,
+				struct ubi_vid_hdr *vid_hdr)
+{
+	struct ubi_wl_entry *e;
+
+	spin_lock(&ubi->wl_lock);
+	e = ubi->lookuptbl[pnum];
+	e->sqnum = be64_to_cpu(vid_hdr->sqnum);
+	e->tagged_scrub_all = 0;
+	spin_unlock(&ubi->wl_lock);
+}
+
+static int is_ubi_readonly(struct ubi_device *ubi)
+{
+	int is_readonly = 0;
+
+	spin_lock(&ubi->wl_lock);
+	if (ubi->ro_mode || !ubi->thread_enabled ||
+	    ubi_dbg_is_bgt_disabled(ubi))
+		is_readonly = 1;
+	spin_unlock(&ubi->wl_lock);
+
+	return is_readonly;
+}
+
+/**
+ * ubi_wl_scan_all - Scan all PEB's
+ * @ubi: UBI device description object
+ * @scrub_sqnum: The max seqnum of the PEB to scrub from the used/pq lists
+ *
+ * This function schedules all device PEBs for scrubbing if the sqnum of the
+ * vol hdr is less than the sqnum in the trigger.
+ *
+ * Return 0 in case of success, (negative) error code otherwise
+ *
+ */
+ssize_t ubi_wl_scrub_all(struct ubi_device *ubi, unsigned long long scrub_sqnum)
+{
+	struct rb_node *node;
+	struct ubi_wl_entry *e, *tmp;
+	int scrub_count = 0;
+	int total_scrub_count = 0;
+	int err, i;
+
+	if (!ubi->lookuptbl) {
+		ubi_err(ubi, "lookuptbl is null");
+		return -ENOENT;
+	}
+
+	if (is_ubi_readonly(ubi)) {
+		ubi_err(ubi, "Cannot *Initiate* scrub:background thread disabled or readonly!");
+		return -EROFS;
+	}
+
+	/* Wait for all currently running work to be done! */
+	down_write(&ubi->work_sem);
+	spin_lock(&ubi->wl_lock);
+	ubi_msg(ubi, "Scrub triggered sqnum = %llu!", scrub_sqnum);
+
+	if (ubi->scrub_in_progress) {
+		ubi_err(ubi, "Scrub already in progress, ignoring the trigger");
+		spin_unlock(&ubi->wl_lock);
+		up_write(&ubi->work_sem); /* Allow new work to start. */
+		return -EBUSY;
+	}
+	ubi->scrub_in_progress = true;
+
+	/* Go through scrub PEB tree and count pending */
+	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
+		e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		self_check_in_wl_tree(ubi, e, &ubi->scrub);
+		e->tagged_scrub_all = 1;
+		total_scrub_count++;
+	}
+
+	/* Move all used pebs to scrub tree */
+	node = rb_first(&ubi->used);
+	while (node != NULL) {
+		e = rb_entry(node, struct ubi_wl_entry, u.rb);
+		self_check_in_wl_tree(ubi, e, &ubi->used);
+		node = rb_next(node);
+
+		if (e->sqnum > scrub_sqnum)
+			continue;
+		rb_erase(&e->u.rb, &ubi->used);
+		wl_tree_add(e, &ubi->scrub);
+		e->tagged_scrub_all = 1;
+		scrub_count++;
+		total_scrub_count++;
+	}
+
+	/* Move all protected pebs to scrub tree */
+	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+
+			if (e->sqnum > scrub_sqnum)
+				continue;
+
+			list_del(&e->u.list);
+			wl_tree_add(e, &ubi->scrub);
+			e->tagged_scrub_all = 1;
+			scrub_count++;
+			total_scrub_count++;
+		}
+	}
+
+	atomic_set(&ubi->scrub_work_count, total_scrub_count);
+	spin_unlock(&ubi->wl_lock);
+	up_write(&ubi->work_sem); /* Allow new work to start. */
+
+	/*
+	 * Technically scrubbing is the same as wear-leveling, so it is done
+	 * by the WL worker.
+	 */
+	err = ensure_wear_leveling(ubi, 0);
+	if (err) {
+		ubi_err(ubi, "Failed to start the WL worker err =%d", err);
+		return err;
+	}
+	ubi_msg(ubi, "Scheduled %d PEB's for scrubbing!", scrub_count);
+	ubi_msg(ubi, "Total PEB's for scrub = %d", total_scrub_count);
+
+	/* Wait for scrub to finish */
+	while (atomic_read(&ubi->scrub_work_count) > 0) {
+		/* Poll every second to check if the scrub work is done */
+		msleep(1000);
+
+		if (is_ubi_readonly(ubi)) {
+			ubi_err(ubi, "Cannot *Complete* scrub:background thread disabled or readonly!");
+			return -EROFS;
+		}
+		wake_up_process(ubi->bgt_thread);
+	}
+
+	spin_lock(&ubi->wl_lock);
+	ubi->scrub_in_progress = false;
+	spin_unlock(&ubi->wl_lock);
+	ubi_msg(ubi, "Done scrubbing %d PEB's!", scrub_count);
+	return 0;
+}
+
+/**
  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
  * @ubi: UBI device description object
  * @pnum: the physical eraseblock to schedule
@@ -1622,6 +1876,8 @@
 
 		e->pnum = aeb->pnum;
 		e->ec = aeb->ec;
+		e->tagged_scrub_all = 0;
+		e->sqnum = aeb->sqnum;
 		ubi_assert(e->ec >= 0);
 
 		wl_tree_add(e, &ubi->free);
@@ -1644,6 +1900,8 @@
 
 			e->pnum = aeb->pnum;
 			e->ec = aeb->ec;
+			e->tagged_scrub_all = 0;
+			e->sqnum = aeb->sqnum;
 			ubi->lookuptbl[e->pnum] = e;
 
 			if (!aeb->scrub) {
@@ -1724,6 +1982,7 @@
 	if (err)
 		goto out_free;
 
+	ubi->wl_is_inited = 1;
 	return 0;
 
 out_free:
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 22b7fec..86a6149 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2084,6 +2084,26 @@
 	return 0;
 }
 
+static int cnss_mhi_link_status(struct mhi_controller *mhi_ctrl, void *priv)
+{
+	struct cnss_pci_data *pci_priv = priv;
+	u16 device_id;
+
+	if (!pci_priv) {
+		cnss_pr_err("pci_priv is NULL\n");
+		return -EINVAL;
+	}
+
+	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
+	if (device_id != pci_priv->device_id)  {
+		cnss_pr_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
+			    device_id, pci_priv->device_id);
+		return -EIO;
+	}
+
+	return 0;
+}
+
 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -2161,6 +2181,87 @@
 	kfree(mhi_ctrl->irq);
 }
 
+static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
+					enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_DEINIT:
+	case CNSS_MHI_POWER_ON:
+		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_FORCE_POWER_OFF:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_POWER_OFF:
+	case CNSS_MHI_SUSPEND:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_RESUME:
+		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
+			return 0;
+		break;
+	case CNSS_MHI_RDDM_DONE:
+		return 0;
+	default:
+		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
+			    cnss_mhi_state_to_str(mhi_state), mhi_state);
+	}
+
+	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
+		    cnss_mhi_state_to_str(mhi_state), mhi_state,
+		    pci_priv->mhi_state);
+
+	return -EINVAL;
+}
+
+static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
+				       enum cnss_mhi_state mhi_state)
+{
+	switch (mhi_state) {
+	case CNSS_MHI_INIT:
+		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_DEINIT:
+		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_POWER_ON:
+		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_POWER_OFF:
+	case CNSS_MHI_FORCE_POWER_OFF:
+		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_SUSPEND:
+		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_RESUME:
+		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+		break;
+	case CNSS_MHI_TRIGGER_RDDM:
+		break;
+	case CNSS_MHI_RDDM_DONE:
+		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+		break;
+	default:
+		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+	}
+}
+
 int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
 			   enum cnss_mhi_state mhi_state)
 {
@@ -2234,74 +2335,8 @@
 	return ret;
 }
 
-static int cnss_mhi_link_status(struct mhi_controller *mhi_ctrl, void *priv)
-{
-	struct cnss_pci_data *pci_priv = priv;
-	u16 device_id;
 
-	if (!pci_priv) {
-		cnss_pr_err("pci_priv is NULL\n");
-		return -EINVAL;
-	}
-
-	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
-	if (device_id != pci_priv->device_id)  {
-		cnss_pr_err("PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
-			    device_id, pci_priv->device_id);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
-					enum cnss_mhi_state mhi_state)
-{
-	switch (mhi_state) {
-	case CNSS_MHI_INIT:
-		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_DEINIT:
-	case CNSS_MHI_POWER_ON:
-		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
-		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_FORCE_POWER_OFF:
-		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_POWER_OFF:
-	case CNSS_MHI_SUSPEND:
-		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
-		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_RESUME:
-		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_TRIGGER_RDDM:
-		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
-		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
-			return 0;
-		break;
-	case CNSS_MHI_RDDM_DONE:
-		return 0;
-	default:
-		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
-			    cnss_mhi_state_to_str(mhi_state), mhi_state);
-	}
-
-	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
-		    cnss_mhi_state_to_str(mhi_state), mhi_state,
-		    pci_priv->mhi_state);
-
-	return -EINVAL;
-}
-
-static int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
+int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
 {
 	int ret;
 	struct cnss_plat_data *plat_priv;
@@ -2341,39 +2376,6 @@
 	plat_priv->ramdump_info_v2.dump_data_valid = false;
 }
 
-static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
-				       enum cnss_mhi_state mhi_state)
-{
-	switch (mhi_state) {
-	case CNSS_MHI_INIT:
-		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_DEINIT:
-		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_POWER_ON:
-		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_POWER_OFF:
-	case CNSS_MHI_FORCE_POWER_OFF:
-		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
-		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_SUSPEND:
-		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_RESUME:
-		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
-		break;
-	case CNSS_MHI_TRIGGER_RDDM:
-		break;
-	case CNSS_MHI_RDDM_DONE:
-		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
-		break;
-	default:
-		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
-	}
-}
 
 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
 {
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index b8a8a92..c0da01f 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1332,6 +1332,7 @@
 	static void __iomem *loopback_lbar_vir;
 	int ret, i;
 	u32 base_sel_size = 0;
+	u32 wr_ofst = 0;
 
 	switch (testcase) {
 	case MSM_PCIE_OUTPUT_PCIE_INFO:
@@ -1563,22 +1564,24 @@
 			break;
 		}
 
+		wr_ofst = wr_offset;
+
 		PCIE_DBG_FS(dev,
 			"base: %s: 0x%pK\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
 			dev->res[base_sel - 1].name,
 			dev->res[base_sel - 1].base,
-			wr_offset, wr_mask, wr_value);
+			wr_ofst, wr_mask, wr_value);
 
 		base_sel_size = resource_size(dev->res[base_sel - 1].resource);
 
-		if (wr_offset >  base_sel_size - 4 ||
-			msm_pcie_check_align(dev, wr_offset))
+		if (wr_ofst >  base_sel_size - 4 ||
+			msm_pcie_check_align(dev, wr_ofst))
 			PCIE_DBG_FS(dev,
 				"PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
-				dev->rc_idx, wr_offset, base_sel_size - 4);
+				dev->rc_idx, wr_ofst, base_sel_size - 4);
 		else
 			msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
-				wr_offset, wr_mask, wr_value);
+				wr_ofst, wr_mask, wr_value);
 
 		break;
 	case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
@@ -5545,7 +5548,7 @@
 {
 	bool child_l0s_enable = 0, child_l1_enable = 0, child_l1ss_enable = 0;
 
-	if (!pdev->subordinate || !(&pdev->subordinate->devices)) {
+	if (!pdev->subordinate || list_empty(&pdev->subordinate->devices)) {
 		PCIE_DBG(dev,
 			"PCIe: RC%d: no device connected to root complex\n",
 			dev->rc_idx);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 9d1c5cc..2564cd5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2061,8 +2061,11 @@
 
 	case IPA_IOC_GET_PHERIPHERAL_EP_INFO:
 		IPADBG("Got IPA_IOC_GET_EP_INFO\n");
-		if (ipa3_ctx->ipa_config_is_auto == false)
-			return -ENOTTY;
+		if (ipa3_ctx->ipa_config_is_auto == false) {
+			IPADBG("not an auto config: returning error\n");
+			retval = -ENOTTY;
+			break;
+		}
 		if (copy_from_user(&ep_info, (const void __user *)arg,
 			sizeof(struct ipa_ioc_get_ep_info))) {
 			IPAERR_RL("copy_from_user fails\n");
@@ -2786,7 +2789,7 @@
 	/* Set the exception path to AP */
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		ep_idx = ipa3_get_ep_mapping(client_idx);
-		if (ep_idx == -1)
+		if (ep_idx == -1 || (ep_idx >= IPA3_MAX_NUM_PIPES))
 			continue;
 
 		/* disable statuses for all modem controlled prod pipes */
@@ -5140,10 +5143,6 @@
 
 	IPADBG("user input string %s\n", dbg_buff);
 
-	/* Prevent consequent calls from trying to load the FW again. */
-	if (ipa3_is_ready())
-		return count;
-
 	/* Check MHI configuration on MDM devices */
 	if (!ipa3_is_msm_device()) {
 
@@ -5183,6 +5182,10 @@
 		}
 	}
 
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_is_ready())
+		return count;
+
 	/* Prevent multiple calls from trying to load the FW again. */
 	if (ipa3_ctx->fw_loaded) {
 		IPAERR("not load FW again\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5df2b90..4166392 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -311,7 +311,7 @@
 	[IPA_4_0_AUTO] = {
 		/*not-used  UL_DL    CV2X  not-used, other are invalid */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{1, 63}, {1, 63}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
+		{1, 63}, {1, 63}, {2, 2}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
 		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -324,7 +324,7 @@
 	[IPA_4_0_AUTO_MHI] = {
 		/* PCIE  DDR   DMA/CV2X not used, other are invalid */
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
-		{4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
+		{3, 3}, {5, 5}, {2, 2}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
 		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index 22ab30f..d045d59 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -996,6 +996,15 @@
 	mhi_parse_state(buf, &nbytes, info);
 	add_uevent_var(env, "MHI_CHANNEL_STATE_12=%s", buf);
 
+	rc = mhi_ctrl_state_info(MHI_CLIENT_DCI_OUT, &info);
+	if (rc) {
+		pr_err("Failed to obtain channel 20 state\n");
+		return -EINVAL;
+	}
+	nbytes = 0;
+	mhi_parse_state(buf, &nbytes, info);
+	add_uevent_var(env, "MHI_CHANNEL_STATE_20=%s", buf);
+
 	return 0;
 }
 
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index c49e254..28792c2 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,7 @@
 static void __iomem *msm_ps_hold;
 static phys_addr_t tcsr_boot_misc_detect;
 static void scm_disable_sdi(void);
+static bool force_warm_reboot;
 
 #ifdef CONFIG_QCOM_DLOAD_MODE
 /* Runtime could be only changed value once.
@@ -87,8 +88,6 @@
 static struct kobject dload_kobj;
 static void *dload_type_addr;
 
-static bool force_warm_reboot;
-
 static int dload_set(const char *val, const struct kernel_param *kp);
 /* interface for exporting attributes */
 struct reset_attribute {
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index 00a4533..8eaa5b5 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -398,7 +398,7 @@
 {
 	u8 table_index = charging ? TABLE_SOC_OCV1 : TABLE_SOC_OCV2;
 
-	if (!the_battery || !the_battery->profile)
+	if (!the_battery || !the_battery->profile_node)
 		return -ENODEV;
 
 	*soc = interpolate_soc(&the_battery->profile[table_index],
@@ -414,7 +414,7 @@
 	u8 table_index = charging ? TABLE_FCC1 : TABLE_FCC2;
 	u32 fcc_mah;
 
-	if (!the_battery || !the_battery->profile)
+	if (!the_battery || !the_battery->profile_node)
 		return -ENODEV;
 
 	fcc_mah = interpolate_single_row_lut(
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
index b38db82..cded6e4 100644
--- a/drivers/regulator/rpm-smd-regulator.c
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -933,10 +933,6 @@
 	if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
 		load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
 
-	rpm_vreg_lock(reg->rpm_vreg);
-	RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
-	rpm_vreg_unlock(reg->rpm_vreg);
-
 	return (load_uA >= reg->rpm_vreg->hpm_min_load)
 		? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
 }
diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c
index 03b1c56..7c74f79 100644
--- a/drivers/soc/qcom/msm-spm.c
+++ b/drivers/soc/qcom/msm-spm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -151,7 +151,7 @@
 static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
 		unsigned int reg_index)
 {
-	if (!dev || !dev->reg_shadow)
+	if (!dev || (dev->reg_shadow == NULL))
 		return;
 
 	__raw_writel(dev->reg_shadow[reg_index],
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index ad1026c..64918e8 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -66,6 +66,7 @@
 	HW_PLATFORM_RCM	= 21,
 	HW_PLATFORM_STP = 23,
 	HW_PLATFORM_SBC = 24,
+	HW_PLATFORM_ADP = 25,
 	HW_PLATFORM_HDK = 31,
 	HW_PLATFORM_INVALID
 };
@@ -87,6 +88,7 @@
 	[HW_PLATFORM_DTV] = "DTV",
 	[HW_PLATFORM_STP] = "STP",
 	[HW_PLATFORM_SBC] = "SBC",
+	[HW_PLATFORM_ADP] = "ADP",
 	[HW_PLATFORM_HDK] = "HDK",
 };
 
diff --git a/drivers/soc/qcom/wcnss/wcnss_vreg.c b/drivers/soc/qcom/wcnss/wcnss_vreg.c
index d4d9a75..476ff60 100644
--- a/drivers/soc/qcom/wcnss/wcnss_vreg.c
+++ b/drivers/soc/qcom/wcnss/wcnss_vreg.c
@@ -585,11 +585,6 @@
 				wcnss_log(ERR, "vreg %s disable failed (%d)\n",
 				       regulators[i].name, rc);
 		}
-		/* Free the regulator source */
-		if (regulators[i].state & VREG_GET_REGULATOR_MASK)
-		regulator_put(regulators[i].regulator);
-
-		regulators[i].state = VREG_NULL_CONFIG;
 	}
 
 }
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a094a15..5b6475a 100755
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -407,7 +407,7 @@
 		mod_delayed_work(queue, &tz->poll_queue,
 				 msecs_to_jiffies(delay));
 	else
-		cancel_delayed_work_sync(&tz->poll_queue);
+		cancel_delayed_work(&tz->poll_queue);
 }
 
 static void monitor_thermal_zone(struct thermal_zone_device *tz)
@@ -2373,7 +2373,7 @@
 
 	mutex_unlock(&thermal_list_lock);
 
-	thermal_zone_device_set_polling(NULL, tz, 0);
+	cancel_delayed_work_sync(&tz->poll_queue);
 
 	if (tz->type[0])
 		device_remove_file(&tz->device, &dev_attr_type);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index c91a47b..84f9b5b 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -176,6 +176,7 @@
 	int edge_count;
 	bool manual_flow;
 	struct msm_geni_serial_ver_info ver_info;
+	u32 cur_tx_remaining;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -779,8 +780,10 @@
 {
 	struct uart_port *uport;
 	struct msm_geni_serial_port *port;
-	int locked = 1;
+	bool locked = true;
 	unsigned long flags;
+	unsigned int geni_status;
+	int irq_en;
 
 	WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
 
@@ -794,10 +797,44 @@
 	else
 		spin_lock_irqsave(&uport->lock, flags);
 
-	if (locked) {
-		__msm_geni_serial_console_write(uport, s, count);
-		spin_unlock_irqrestore(&uport->lock, flags);
+	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+
+	/* Cancel the current write to log the fault */
+	if (!locked) {
+		geni_cancel_m_cmd(uport->membase);
+		if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+						M_CMD_CANCEL_EN, true)) {
+			geni_abort_m_cmd(uport->membase);
+			msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+							M_CMD_ABORT_EN, true);
+			geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
+							SE_GENI_M_IRQ_CLEAR);
+		}
+		writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
+							SE_GENI_M_IRQ_CLEAR);
+	} else if ((geni_status & M_GENI_CMD_ACTIVE) &&
+						!port->cur_tx_remaining) {
+		/* It seems we can interrupt existing transfers unless all data
+		 * has been sent, in which case we need to look for done first.
+		 */
+		msm_geni_serial_poll_cancel_tx(uport);
+
+		/* Enable WATERMARK interrupt for every new console write op */
+		if (uart_circ_chars_pending(&uport->state->xmit)) {
+			irq_en = geni_read_reg_nolog(uport->membase,
+						SE_GENI_M_IRQ_EN);
+			geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN,
+					uport->membase, SE_GENI_M_IRQ_EN);
+		}
 	}
+
+	__msm_geni_serial_console_write(uport, s, count);
+
+	if (port->cur_tx_remaining)
+		msm_geni_serial_setup_tx(uport, port->cur_tx_remaining);
+
+	if (locked)
+		spin_unlock_irqrestore(&uport->lock, flags);
 }
 
 static int handle_rx_console(struct uart_port *uport,
@@ -1261,42 +1298,62 @@
 	return ret;
 }
 
-static int msm_geni_serial_handle_tx(struct uart_port *uport)
+static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done,
+		bool active)
 {
 	int ret = 0;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	struct circ_buf *xmit = &uport->state->xmit;
 	unsigned int avail_fifo_bytes = 0;
 	unsigned int bytes_remaining = 0;
+	unsigned int pending;
 	int i = 0;
 	unsigned int tx_fifo_status;
 	unsigned int xmit_size;
 	unsigned int fifo_width_bytes =
 		(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
 	int temp_tail = 0;
+	int irq_en;
 
-	xmit_size = uart_circ_chars_pending(xmit);
 	tx_fifo_status = geni_read_reg_nolog(uport->membase,
 					SE_GENI_TX_FIFO_STATUS);
-	/* Both FIFO and framework buffer are drained */
-	if (!xmit_size && !tx_fifo_status) {
+
+	/* Complete the current tx command before taking newly added data */
+	if (active)
+		pending = msm_port->cur_tx_remaining;
+	else
+		pending = uart_circ_chars_pending(xmit);
+
+	/* All data has been transmitted and acknowledged as received */
+	if (!pending && !tx_fifo_status && done) {
 		msm_geni_serial_stop_tx(uport);
 		goto exit_handle_tx;
 	}
 
-	avail_fifo_bytes = (msm_port->tx_fifo_depth - msm_port->tx_wm) *
-							fifo_width_bytes;
-	temp_tail = xmit->tail & (UART_XMIT_SIZE - 1);
+	avail_fifo_bytes = msm_port->tx_fifo_depth - (tx_fifo_status &
+								TX_FIFO_WC);
+	avail_fifo_bytes *= fifo_width_bytes;
+	if (avail_fifo_bytes < 0)
+		avail_fifo_bytes = 0;
 
-	if (xmit_size > (UART_XMIT_SIZE - temp_tail))
-		xmit_size = (UART_XMIT_SIZE - temp_tail);
-	if (xmit_size > avail_fifo_bytes)
-		xmit_size = avail_fifo_bytes;
+	temp_tail = xmit->tail;
+	xmit_size = min(avail_fifo_bytes, pending);
 	if (!xmit_size)
 		goto exit_handle_tx;
 
-	msm_geni_serial_setup_tx(uport, xmit_size);
+	if (!msm_port->cur_tx_remaining) {
+		msm_geni_serial_setup_tx(uport, pending);
+		msm_port->cur_tx_remaining = pending;
+
+		/* Re-Enable WATERMARK interrupt while starting new transfer */
+		irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+		if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
+			geni_write_reg_nolog(irq_en | M_TX_FIFO_WATERMARK_EN,
+					uport->membase, SE_GENI_M_IRQ_EN);
+	}
+
 	bytes_remaining = xmit_size;
+
 	while (i < xmit_size) {
 		unsigned int tx_bytes;
 		unsigned int buf = 0;
@@ -1305,20 +1362,39 @@
 		tx_bytes = ((bytes_remaining < fifo_width_bytes) ?
 					bytes_remaining : fifo_width_bytes);
 
-		for (c = 0; c < tx_bytes ; c++)
-			buf |= (xmit->buf[temp_tail + c] << (c * 8));
+		for (c = 0; c < tx_bytes ; c++) {
+			buf |= (xmit->buf[temp_tail++] << (c * 8));
+			temp_tail &= UART_XMIT_SIZE - 1;
+		}
+
 		geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
+
 		i += tx_bytes;
 		bytes_remaining -= tx_bytes;
 		uport->icount.tx += tx_bytes;
-		temp_tail += tx_bytes;
+		msm_port->cur_tx_remaining -= tx_bytes;
 		/* Ensure FIFO write goes through */
 		wmb();
 	}
-	xmit->tail = temp_tail & (UART_XMIT_SIZE - 1);
-	if (uart_console(uport))
-		msm_geni_serial_poll_cancel_tx(uport);
+	xmit->tail = temp_tail;
+
+	/*
+	 * The tx fifo watermark is level triggered and latched. Though we had
+	 * cleared it in qcom_geni_serial_isr it will have already reasserted
+	 * so we must clear it again here after our writes.
+	 */
+	geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
+						SE_GENI_M_IRQ_CLEAR);
+
 exit_handle_tx:
+	if (!msm_port->cur_tx_remaining) {
+		/* Clear WATERMARK interrupt for each transfer completion */
+		irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
+		if (irq_en & M_TX_FIFO_WATERMARK_EN)
+			geni_write_reg_nolog(irq_en & ~M_TX_FIFO_WATERMARK_EN,
+					uport->membase, SE_GENI_M_IRQ_EN);
+	}
+
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
 		uart_write_wakeup(uport);
 	return ret;
@@ -1414,6 +1490,7 @@
 	struct uart_port *uport = dev;
 	unsigned long flags;
 	unsigned int m_irq_en;
+	unsigned int geni_status;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	struct tty_port *tport = &uport->state->port;
 	bool drop_rx = false;
@@ -1435,6 +1512,7 @@
 	dma = geni_read_reg_nolog(uport->membase, SE_GENI_DMA_MODE_EN);
 	dma_tx_status = geni_read_reg_nolog(uport->membase, SE_DMA_TX_IRQ_STAT);
 	dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT);
+	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
 
 	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
 	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
@@ -1454,8 +1532,10 @@
 
 	if (!dma) {
 		if ((m_irq_status & m_irq_en) &
-		    (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
-			msm_geni_serial_handle_tx(uport);
+			(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+			msm_geni_serial_handle_tx(uport,
+					m_irq_status & M_CMD_DONE_EN,
+					geni_status & M_GENI_CMD_ACTIVE);
 
 		if ((s_irq_status & S_GP_IRQ_0_EN) ||
 			(s_irq_status & S_GP_IRQ_1_EN)) {
diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c
index 84ee1dd..c9f8af1 100644
--- a/drivers/tty/serial/msm_smd_tty.c
+++ b/drivers/tty/serial/msm_smd_tty.c
@@ -1,5 +1,5 @@
 /* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2015, 2017, 2019, The Linux Foundation. All rights reserved.
  * Author: Brian Swetland <swetland@google.com>
  *
  * This software is licensed under the terms of the GNU General Public
@@ -370,7 +370,7 @@
 	int n;
 
 	for (n = 0; n < MAX_SMD_TTYS; ++n) {
-		if (!smd_tty[n].dev_name)
+		if (smd_tty[n].dev_name == NULL)
 			continue;
 
 		if (pdev->id == smd_tty[n].edge &&
@@ -504,7 +504,7 @@
 	struct smd_tty_info *info;
 	const char *peripheral = NULL;
 
-	if (n >= MAX_SMD_TTYS || !smd_tty[n].ch_name)
+	if (n >= MAX_SMD_TTYS || smd_tty[n].ch_name == NULL)
 		return -ENODEV;
 
 	info = smd_tty + n;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 51413dc..1acfd3a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -2429,9 +2429,12 @@
 {
 	struct f_gsi	 *gsi = func_to_gsi(f);
 	struct f_gsi	 *gsi_rmnet_v2x = __gsi[USB_PROT_RMNET_V2X_IPA];
+	struct f_gsi	 *gsi_ecm = __gsi[USB_PROT_ECM_IPA];
 	struct usb_composite_dev *cdev = f->config->cdev;
 	struct net_device	*net;
 	int ret;
+	int in_intr_num = 0;
+	int out_intr_num = 0;
 
 	log_event_dbg("intf=%u, alt=%u", intf, alt);
 
@@ -2504,21 +2507,43 @@
 			}
 
 			/*
-			 * Configure EPs for GSI. Note that when both RmNet LTE
-			 * (or ECM) and RmNet V2X instances are enabled in a
-			 * composition, configure HW accelerated EPs for V2X
-			 * instance and normal EPs for LTE (or ECM).
+			 * Configure EPs for GSI. Note that:
+			 * 1. In general, configure HW accelerated EPs for all
+			 *    instances.
+			 * 2. If both RmNet LTE and RmNet V2X instances are
+			 *    enabled in a composition, configure HW accelerated
+			 *    EPs for V2X and normal EPs for LTE.
+			 * 3. If RmNet V2X, ECM and ADPL instances are enabled
+			 *    in a composition, configure HW accelerated EPs in
+			 *    both directions for V2X and IN direction for ECM.
+			 *    Configure normal EPs for ECM OUT and ADPL.
 			 */
+			switch (gsi->prot_id) {
+			case USB_PROT_RMNET_IPA:
+				if (!gsi_rmnet_v2x->function.fs_descriptors) {
+					in_intr_num = 2;
+					out_intr_num = 1;
+				}
+				break;
+			case USB_PROT_ECM_IPA:
+				in_intr_num = 2;
+				if (!gsi_rmnet_v2x->function.fs_descriptors)
+					out_intr_num = 1;
+				break;
+			case USB_PROT_DIAG_IPA:
+				if (!(gsi_ecm->function.fs_descriptors &&
+					gsi_rmnet_v2x->function.fs_descriptors))
+					in_intr_num = 3;
+				break;
+			default:
+				in_intr_num = 2;
+				out_intr_num = 1;
+			}
+
+			/* gsi_configure_ep required only for GSI-IPA EPs */
 			if (gsi->d_port.in_ep &&
 				gsi->prot_id <= USB_PROT_RMNET_V2X_IPA) {
-				if (gsi->prot_id == USB_PROT_DIAG_IPA)
-					gsi->d_port.in_ep->ep_intr_num = 3;
-				else if ((gsi->prot_id == USB_PROT_RMNET_IPA ||
-					 gsi->prot_id == USB_PROT_ECM_IPA) &&
-					 gsi_rmnet_v2x->function.fs_descriptors)
-					gsi->d_port.in_ep->ep_intr_num = 0;
-				else
-					gsi->d_port.in_ep->ep_intr_num = 2;
+				gsi->d_port.in_ep->ep_intr_num = in_intr_num;
 				usb_gsi_ep_op(gsi->d_port.in_ep,
 					&gsi->d_port.in_request,
 						GSI_EP_OP_CONFIG);
@@ -2526,12 +2551,7 @@
 
 			if (gsi->d_port.out_ep &&
 				gsi->prot_id <= USB_PROT_RMNET_V2X_IPA) {
-				if ((gsi->prot_id == USB_PROT_RMNET_IPA ||
-				     gsi->prot_id == USB_PROT_ECM_IPA) &&
-				     gsi_rmnet_v2x->function.fs_descriptors)
-					gsi->d_port.out_ep->ep_intr_num = 0;
-				else
-					gsi->d_port.out_ep->ep_intr_num = 1;
+				gsi->d_port.out_ep->ep_intr_num = out_intr_num;
 				usb_gsi_ep_op(gsi->d_port.out_ep,
 					&gsi->d_port.out_request,
 						GSI_EP_OP_CONFIG);
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 3e9b7eb..3a5dd02 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -2,7 +2,7 @@
  * Core MDSS framebuffer driver.
  *
  * Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -3579,6 +3579,9 @@
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 
+	if (!mfd)
+		return -EPERM;
+
 	if (!mfd->op_enable)
 		return -EPERM;
 
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index e72a315..5767ca1 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -600,6 +600,7 @@
 	struct mutex flush_lock;
 	struct mutex *shared_lock;
 	struct mutex rsrc_lock;
+	struct mutex vsync_handler_lock;
 	spinlock_t spin_lock;
 
 	struct mdss_panel_data *panel_data;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 029634b..75c2b6f 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2452,6 +2452,7 @@
 			mutex_init(&ctl->offlock);
 			mutex_init(&ctl->flush_lock);
 			mutex_init(&ctl->rsrc_lock);
+			mutex_init(&ctl->vsync_handler_lock);
 			spin_lock_init(&ctl->spin_lock);
 			BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
 			pr_debug("alloc ctl_num=%d\n", ctl->num);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 1c70637..9e27bb1 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -1982,6 +1982,49 @@
 	pr_debug("%pS->%s ctl:%d\n",
 		__builtin_return_address(0), __func__, ctl->num);
 
+	mutex_lock(&ctl->vsync_handler_lock);
+
+	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888);
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl)
+		sctx = (struct mdss_mdp_cmd_ctx *) sctl->intf_ctx[MASTER_CTX];
+
+	spin_lock_irqsave(&ctx->clk_lock, flags);
+	if (handle->enabled) {
+		handle->enabled = false;
+		list_del_init(&handle->list);
+		disable_vsync_irq = !handle->cmd_post_flush;
+	}
+	spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+	if (disable_vsync_irq) {
+		/* disable rd_ptr interrupt and clocks */
+		mdss_mdp_setup_vsync(ctx, false);
+		complete(&ctx->stop_comp);
+	}
+
+	mutex_unlock(&ctl->vsync_handler_lock);
+
+	return 0;
+}
+
+static int mdss_mdp_cmd_remove_vsync_handler_nolock(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_vsync_handler *handle)
+{
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
+	unsigned long flags;
+	bool disable_vsync_irq = false;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+	if (!ctx) {
+		pr_err("%s: invalid ctx\n", __func__);
+		return -ENODEV;
+	}
+
+	pr_debug("%pS->%s ctl:%d\n",
+		__builtin_return_address(0), __func__, ctl->num);
+
 	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), 0x88888);
 	sctl = mdss_mdp_get_split_ctl(ctl);
 	if (sctl)
@@ -3216,8 +3259,12 @@
 		return -ENODEV;
 	}
 
-	list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
-		mdss_mdp_cmd_remove_vsync_handler(ctl, handle);
+	mutex_lock(&ctl->vsync_handler_lock);
+	list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list) {
+		mdss_mdp_cmd_remove_vsync_handler_nolock(ctl, handle);
+	}
+	mutex_unlock(&ctl->vsync_handler_lock);
+
 	if (mdss_mdp_is_lineptr_supported(ctl))
 		mdss_mdp_cmd_lineptr_ctrl(ctl, false);
 	MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_ENTRY);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 1cb4389..c2e3b18 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -3133,7 +3133,7 @@
 	}
 
 	if (!ad->bl_mfd || !ad->bl_mfd->panel_info ||
-		!ad->bl_att_lut) {
+		ad->bl_att_lut == NULL) {
 		pr_err("Invalid ad info: bl_mfd = 0x%pK, ad->bl_mfd->panel_info = 0x%pK, bl_att_lut = 0x%pK\n",
 			ad->bl_mfd,
 			(!ad->bl_mfd) ? NULL : ad->bl_mfd->panel_info,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
old mode 100755
new mode 100644
index 1781989..a710c73
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -530,6 +530,10 @@
 	struct bio *bio = *fio->bio;
 	struct page *page = fio->encrypted_page ?
 			fio->encrypted_page : fio->page;
+	struct inode *inode;
+	bool bio_encrypted;
+	int bi_crypt_skip;
+	u64 dun;
 
 	if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
 			__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@@ -538,16 +542,29 @@
 	trace_f2fs_submit_page_bio(page, fio);
 	f2fs_trace_ios(fio, 0);
 
+	inode = fio->page->mapping->host;
+	dun = PG_DUN(inode, fio->page);
+	bi_crypt_skip = fio->encrypted_page ? 1 : 0;
+	bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
+	fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
+
 	if (bio && (*fio->last_block + 1 != fio->new_blkaddr ||
 			!__same_bdev(fio->sbi, fio->new_blkaddr, bio))) {
 		__submit_bio(fio->sbi, bio, fio->type);
 		bio = NULL;
 	}
+	/* ICE support */
+	if (bio && !fscrypt_mergeable_bio(bio, dun,
+				bio_encrypted, bi_crypt_skip))
+		__submit_bio(fio->sbi, bio, fio->type);
 alloc_new:
 	if (!bio) {
 		bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
 				BIO_MAX_PAGES, false, fio->type, fio->temp);
 		bio_set_op_attrs(bio, fio->op, fio->op_flags);
+		if (bio_encrypted)
+			fscrypt_set_ice_dun(inode, bio, dun);
+		fscrypt_set_ice_skip(bio, bi_crypt_skip);
 	}
 
 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1daeacb..0c51f91 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -794,6 +794,25 @@
 	size_t size;
 };
 
+struct cec_adapter;
+struct edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @name: name of the CEC adapter
+ * @parent: parent device of the CEC adapter
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+	struct mutex lock;
+	struct cec_adapter *adap;
+	const char *name;
+	struct device *parent;
+	struct delayed_work unregister_work;
+};
+
 /**
  * struct drm_dp_aux - DisplayPort AUX channel
  * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -846,6 +865,10 @@
 	 * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
 	 */
 	unsigned i2c_defer_count;
+	/**
+	 * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+	 */
+	struct drm_dp_aux_cec cec;
 };
 
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -914,4 +937,37 @@
 int drm_dp_aux_register(struct drm_dp_aux *aux);
 void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+				   struct device *parent);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+						 const char *name,
+						 struct device *parent)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+				       const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index e04e291..90e8394 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -148,10 +148,10 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0xCB4
+#define APPS_EVENT_LAST_ID		0xCB7
 
 #define MSG_SSID_0			0
-#define MSG_SSID_0_LAST			130
+#define MSG_SSID_0_LAST			131
 #define MSG_SSID_1			500
 #define MSG_SSID_1_LAST			506
 #define MSG_SSID_2			1000
@@ -354,13 +354,15 @@
 	MSG_LVL_MED,
 	MSG_LVL_HIGH,
 	MSG_LVL_LOW,
-	MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
 	MSG_LVL_HIGH,
 	MSG_LVL_LOW,
 	MSG_LVL_MED,
 	MSG_LVL_MED,
 	MSG_LVL_HIGH,
-	MSG_LVL_HIGH
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR
 };
 
 static const uint32_t msg_bld_masks_1[] = {
@@ -922,7 +924,7 @@
 /* LOG CODES */
 static const uint32_t log_code_last_tbl[] = {
 	0x0,	/* EQUIP ID 0 */
-	0x1CB2,	/* EQUIP ID 1 */
+	0x1CC0,	/* EQUIP ID 1 */
 	0x0,	/* EQUIP ID 2 */
 	0x0,	/* EQUIP ID 3 */
 	0x4910,	/* EQUIP ID 4 */
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
new file mode 100644
index 0000000..e083d57
--- /dev/null
+++ b/include/linux/mhi.h
@@ -0,0 +1,812 @@
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MHI_H_
+#define _MHI_H_
+
+struct mhi_chan;
+struct mhi_event;
+struct mhi_ctxt;
+struct mhi_cmd;
+struct image_info;
+struct bhi_vec_entry;
+struct mhi_timesync;
+struct mhi_buf_info;
+
+/**
+ * enum MHI_CB - MHI callback
+ * @MHI_CB_IDLE: MHI entered idle state
+ * @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_DTR_SIGNAL: DTR signaling update
+ * @MHI_CB_LPM_ENTER: MHI host entered low power mode
+ * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
+ * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
+ * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode ee
+ * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
+ * @MHI_CB_FATAL_ERROR: MHI device entered fatal error
+ */
+enum MHI_CB {
+	MHI_CB_IDLE,
+	MHI_CB_PENDING_DATA,
+	MHI_CB_DTR_SIGNAL,
+	MHI_CB_LPM_ENTER,
+	MHI_CB_LPM_EXIT,
+	MHI_CB_EE_RDDM,
+	MHI_CB_EE_MISSION_MODE,
+	MHI_CB_SYS_ERROR,
+	MHI_CB_FATAL_ERROR,
+};
+
+/**
+ * enum MHI_DEBUG_LEVL - various debugging level
+ */
+enum MHI_DEBUG_LEVEL {
+	MHI_MSG_LVL_VERBOSE,
+	MHI_MSG_LVL_INFO,
+	MHI_MSG_LVL_ERROR,
+	MHI_MSG_LVL_CRITICAL,
+	MHI_MSG_LVL_MASK_ALL,
+};
+
+/**
+ * enum MHI_FLAGS - Transfer flags
+ * @MHI_EOB: End of buffer for bulk transfer
+ * @MHI_EOT: End of transfer
+ * @MHI_CHAIN: Linked transfer
+ */
+enum MHI_FLAGS {
+	MHI_EOB,
+	MHI_EOT,
+	MHI_CHAIN,
+};
+
+/**
+ * enum mhi_device_type - Device types
+ * @MHI_XFER_TYPE: Handles data transfer
+ * @MHI_TIMESYNC_TYPE: Use for timesync feature
+ * @MHI_CONTROLLER_TYPE: Control device
+ */
+enum mhi_device_type {
+	MHI_XFER_TYPE,
+	MHI_TIMESYNC_TYPE,
+	MHI_CONTROLLER_TYPE,
+};
+
+/**
+ * enum mhi_ee - device current execution enviornment
+ * @MHI_EE_PBL - device in PBL
+ * @MHI_EE_SBL - device in SBL
+ * @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
+ * @MHI_EE_RDDM - device in ram dump collection mode
+ * @MHI_EE_WFW - device in WLAN firmware mode
+ * @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
+ * @MHI_EE_EDL - device in emergency download mode
+ */
+enum mhi_ee {
+	MHI_EE_PBL,
+	MHI_EE_SBL,
+	MHI_EE_AMSS,
+	MHI_EE_RDDM,
+	MHI_EE_WFW,
+	MHI_EE_PTHRU,
+	MHI_EE_EDL,
+	MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
+	MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
+	MHI_EE_NOT_SUPPORTED,
+	MHI_EE_MAX,
+};
+
+/**
+ * enum mhi_dev_state - device current MHI state
+ */
+enum mhi_dev_state {
+	MHI_STATE_RESET = 0x0,
+	MHI_STATE_READY = 0x1,
+	MHI_STATE_M0 = 0x2,
+	MHI_STATE_M1 = 0x3,
+	MHI_STATE_M2 = 0x4,
+	MHI_STATE_M3 = 0x5,
+	MHI_STATE_M3_FAST = 0x6,
+	MHI_STATE_BHI  = 0x7,
+	MHI_STATE_SYS_ERR  = 0xFF,
+	MHI_STATE_MAX,
+};
+
+/**
+ * struct mhi_link_info - bw requirement
+ * target_link_speed - as defined by TLS bits in LinkControl reg
+ * target_link_width - as defined by NLW bits in LinkStatus reg
+ * sequence_num - used by device to track bw requests sent to host
+ */
+struct mhi_link_info {
+	unsigned int target_link_speed;
+	unsigned int target_link_width;
+	int sequence_num;
+};
+
+#define MHI_VOTE_BUS BIT(0) /* do not disable the bus */
+#define MHI_VOTE_DEVICE BIT(1) /* prevent mhi device from entering lpm */
+
+/**
+ * struct image_info - firmware and rddm table table
+ * @mhi_buf - Contain device firmware and rddm table
+ * @entries - # of entries in table
+ */
+struct image_info {
+	struct mhi_buf *mhi_buf;
+	struct bhi_vec_entry *bhi_vec;
+	u32 entries;
+};
+
+/**
+ * struct mhi_controller - Master controller structure for external modem
+ * @dev: Device associated with this controller
+ * @of_node: DT that has MHI configuration information
+ * @regs: Points to base of MHI MMIO register space
+ * @bhi: Points to base of MHI BHI register space
+ * @bhie: Points to base of MHI BHIe register space
+ * @wake_db: MHI WAKE doorbell register address
+ * @dev_id: PCIe device id of the external device
+ * @domain: PCIe domain the device connected to
+ * @bus: PCIe bus the device assigned to
+ * @slot: PCIe slot for the modem
+ * @iova_start: IOMMU starting address for data
+ * @iova_stop: IOMMU stop address for data
+ * @fw_image: Firmware image name for normal booting
+ * @edl_image: Firmware image name for emergency download mode
+ * @fbc_download: MHI host needs to do complete image transfer
+ * @rddm_size: RAM dump size that host should allocate for debugging purpose
+ * @sbl_size: SBL image size
+ * @seg_len: BHIe vector size
+ * @fbc_image: Points to firmware image buffer
+ * @rddm_image: Points to RAM dump buffer
+ * @max_chan: Maximum number of channels controller support
+ * @mhi_chan: Points to channel configuration table
+ * @lpm_chans: List of channels that require LPM notifications
+ * @total_ev_rings: Total # of event rings allocated
+ * @hw_ev_rings: Number of hardware event rings
+ * @sw_ev_rings: Number of software event rings
+ * @msi_required: Number of msi required to operate
+ * @msi_allocated: Number of msi allocated by bus master
+ * @irq: base irq # to request
+ * @mhi_event: MHI event ring configurations table
+ * @mhi_cmd: MHI command ring configurations table
+ * @mhi_ctxt: MHI device context, shared memory between host and device
+ * @timeout_ms: Timeout in ms for state transitions
+ * @pm_state: Power management state
+ * @ee: MHI device execution environment
+ * @dev_state: MHI STATE
+ * @mhi_link_info: requested link bandwidth by device
+ * @status_cb: CB function to notify various power states to but master
+ * @link_status: Query link status in case of abnormal value read from device
+ * @runtime_get: Async runtime resume function
+ * @runtimet_put: Release votes
+ * @time_get: Return host time in us
+ * @lpm_disable: Request controller to disable link level low power modes
+ * @lpm_enable: Controller may enable link level low power modes again
+ * @priv_data: Points to bus master's private data
+ */
+struct mhi_controller {
+	struct list_head node;
+	struct mhi_device *mhi_dev;
+
+	/* device node for iommu ops */
+	struct device *dev;
+	struct device_node *of_node;
+
+	/* mmio base */
+	phys_addr_t base_addr;
+	void __iomem *regs;
+	void __iomem *bhi;
+	void __iomem *bhie;
+	void __iomem *wake_db;
+	void __iomem *bw_scale_db;
+
+	/* device topology */
+	u32 dev_id;
+	u32 domain;
+	u32 bus;
+	u32 slot;
+	u32 family_number;
+	u32 device_number;
+	u32 major_version;
+	u32 minor_version;
+
+	/* addressing window */
+	dma_addr_t iova_start;
+	dma_addr_t iova_stop;
+
+	/* fw images */
+	const char *fw_image;
+	const char *edl_image;
+
+	/* mhi host manages downloading entire fbc images */
+	bool fbc_download;
+	size_t rddm_size;
+	size_t sbl_size;
+	size_t seg_len;
+	u32 session_id;
+	u32 sequence_id;
+	struct image_info *fbc_image;
+	struct image_info *rddm_image;
+
+	/* physical channel config data */
+	u32 max_chan;
+	struct mhi_chan *mhi_chan;
+	struct list_head lpm_chans; /* these chan require lpm notification */
+
+	/* physical event config data */
+	u32 total_ev_rings;
+	u32 hw_ev_rings;
+	u32 sw_ev_rings;
+	u32 msi_required;
+	u32 msi_allocated;
+	int *irq; /* interrupt table */
+	struct mhi_event *mhi_event;
+	struct list_head lp_ev_rings; /* low priority event rings */
+
+	/* cmd rings */
+	struct mhi_cmd *mhi_cmd;
+
+	/* mhi context (shared with device) */
+	struct mhi_ctxt *mhi_ctxt;
+
+	u32 timeout_ms;
+
+	/* caller should grab pm_mutex for suspend/resume operations */
+	struct mutex pm_mutex;
+	bool pre_init;
+	rwlock_t pm_lock;
+	u32 pm_state;
+	u32 saved_pm_state; /* saved state during fast suspend */
+	u32 db_access; /* db access only on these states */
+	enum mhi_ee ee;
+	u32 ee_table[MHI_EE_MAX]; /* ee conversion from dev to host */
+	enum mhi_dev_state dev_state;
+	enum mhi_dev_state saved_dev_state;
+	bool wake_set;
+	atomic_t dev_wake;
+	atomic_t alloc_size;
+	atomic_t pending_pkts;
+	struct list_head transition_list;
+	spinlock_t transition_lock;
+	spinlock_t wlock;
+
+	/* target bandwidth info */
+	struct mhi_link_info mhi_link_info;
+
+	/* debug counters */
+	u32 M0, M2, M3, M3_FAST;
+
+	/* worker for different state transitions */
+	struct work_struct st_worker;
+	struct work_struct fw_worker;
+	struct work_struct syserr_worker;
+	struct work_struct low_priority_worker;
+	wait_queue_head_t state_event;
+
+	/* shadow functions */
+	void (*status_cb)(struct mhi_controller *, void *, enum MHI_CB);
+	int (*link_status)(struct mhi_controller *, void *);
+	void (*wake_get)(struct mhi_controller *, bool);
+	void (*wake_put)(struct mhi_controller *, bool);
+	void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
+	int (*runtime_get)(struct mhi_controller *, void *);
+	void (*runtime_put)(struct mhi_controller *, void *);
+	u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
+	int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
+	int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
+	int (*map_single)(struct mhi_controller *mhi_cntrl,
+			  struct mhi_buf_info *buf);
+	void (*unmap_single)(struct mhi_controller *mhi_cntrl,
+			     struct mhi_buf_info *buf);
+	void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
+	int (*bw_scale)(struct mhi_controller *mhi_cntrl,
+			struct mhi_link_info *link_info);
+
+	/* channel to control DTR messaging */
+	struct mhi_device *dtr_dev;
+
+	/* bounce buffer settings */
+	bool bounce_buf;
+	size_t buffer_len;
+
+	/* supports time sync feature */
+	struct mhi_timesync *mhi_tsync;
+	struct mhi_device *tsync_dev;
+	u64 local_timer_freq;
+	u64 remote_timer_freq;
+
+	/* kernel log level */
+	enum MHI_DEBUG_LEVEL klog_lvl;
+
+	/* private log level controller driver to set */
+	enum MHI_DEBUG_LEVEL log_lvl;
+
+	/* controller specific data */
+	void *priv_data;
+	void *log_buf;
+	struct dentry *dentry;
+	struct dentry *parent;
+};
+
+/**
+ * struct mhi_device - mhi device structure associated bind to channel
+ * @dev: Device associated with the channels
+ * @mtu: Maximum # of bytes controller support
+ * @ul_chan_id: MHI channel id for UL transfer
+ * @dl_chan_id: MHI channel id for DL transfer
+ * @tiocm: Device current terminal settings
+ * @early_notif: This device needs an early notification in case of error
+ * with external modem.
+ * @dev_vote: Keep external device in active state
+ * @bus_vote: Keep physical bus (pci, spi) in active state
+ * @priv: Driver private data
+ */
+struct mhi_device {
+	struct device dev;
+	u32 dev_id;
+	u32 domain;
+	u32 bus;
+	u32 slot;
+	size_t mtu;
+	int ul_chan_id;
+	int dl_chan_id;
+	int ul_event_id;
+	int dl_event_id;
+	u32 tiocm;
+	bool early_notif;
+	const struct mhi_device_id *id;
+	const char *chan_name;
+	struct mhi_controller *mhi_cntrl;
+	struct mhi_chan *ul_chan;
+	struct mhi_chan *dl_chan;
+	atomic_t dev_vote;
+	atomic_t bus_vote;
+	enum mhi_device_type dev_type;
+	void *priv_data;
+	int (*ul_xfer)(struct mhi_device *, struct mhi_chan *, void *,
+		       size_t, enum MHI_FLAGS);
+	int (*dl_xfer)(struct mhi_device *, struct mhi_chan *, void *,
+		       size_t, enum MHI_FLAGS);
+	void (*status_cb)(struct mhi_device *, enum MHI_CB);
+};
+
+/**
+ * struct mhi_result - Completed buffer information
+ * @buf_addr: Address of data buffer
+ * @dir: Channel direction
+ * @bytes_xfer: # of bytes transferred
+ * @transaction_status: Status of last trasnferred
+ */
+struct mhi_result {
+	void *buf_addr;
+	enum dma_data_direction dir;
+	size_t bytes_xferd;
+	int transaction_status;
+};
+
+/**
+ * struct mhi_buf - Describes the buffer
+ * @page: buffer as a page
+ * @buf: cpu address for the buffer
+ * @phys_addr: physical address of the buffer
+ * @dma_addr: iommu address for the buffer
+ * @skb: skb of ip packet
+ * @len: # of bytes
+ * @name: Buffer label, for offload channel configurations name must be:
+ * ECA - Event context array data
+ * CCA - Channel context array data
+ */
+struct mhi_buf {
+	struct list_head node;
+	struct page *page;
+	void *buf;
+	phys_addr_t phys_addr;
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	size_t len;
+	const char *name; /* ECA, CCA */
+};
+
+/**
+ * struct mhi_driver - mhi driver information
+ * @id_table: NULL terminated channel ID names
+ * @ul_xfer_cb: UL data transfer callback
+ * @dl_xfer_cb: DL data transfer callback
+ * @status_cb: Asynchronous status callback
+ */
+struct mhi_driver {
+	const struct mhi_device_id *id_table;
+	int (*probe)(struct mhi_device *, const struct mhi_device_id *id);
+	void (*remove)(struct mhi_device *);
+	void (*ul_xfer_cb)(struct mhi_device *, struct mhi_result *);
+	void (*dl_xfer_cb)(struct mhi_device *, struct mhi_result *);
+	void (*status_cb)(struct mhi_device *, enum MHI_CB mhi_cb);
+	struct device_driver driver;
+};
+
+#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
+
+static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
+					  void *priv)
+{
+	mhi_dev->priv_data = priv;
+}
+
+static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
+{
+	return mhi_dev->priv_data;
+}
+
+/**
+ * mhi_queue_transfer - Queue a buffer to hardware
+ * All transfers are asyncronous transfers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Data direction
+ * @buf: Data buffer (skb for hardware channels)
+ * @len: Size in bytes
+ * @mflags: Interrupt flags for the device
+ */
+static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
+				     enum dma_data_direction dir,
+				     void *buf,
+				     size_t len,
+				     enum MHI_FLAGS mflags)
+{
+	if (dir == DMA_TO_DEVICE)
+		return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
+					mflags);
+	else
+		return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
+					mflags);
+}
+
+static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
+{
+	return mhi_cntrl->priv_data;
+}
+
+static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
+{
+	kfree(mhi_cntrl);
+}
+
+/**
+ * mhi_driver_register - Register driver with MHI framework
+ * @mhi_drv: mhi_driver structure
+ */
+int mhi_driver_register(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_driver_unregister - Unregister a driver for mhi_devices
+ * @mhi_drv: mhi_driver structure
+ */
+void mhi_driver_unregister(struct mhi_driver *mhi_drv);
+
+/**
+ * mhi_device_configure - configure ECA or CCA context
+ * For offload channels that client manage, call this
+ * function to configure channel context or event context
+ * array associated with the channel
+ * @mhi_div: Device associated with the channels
+ * @dir: Direction of the channel
+ * @mhi_buf: Configuration data
+ * @elements: # of configuration elements
+ */
+int mhi_device_configure(struct mhi_device *mhi_div,
+			 enum dma_data_direction dir,
+			 struct mhi_buf *mhi_buf,
+			 int elements);
+
+/**
+ * mhi_device_get - disable low power modes
+ * Only disables lpm, does not immediately exit low power mode
+ * if controller already in a low power mode
+ * @mhi_dev: Device associated with the channels
+ * @vote: requested vote (bus, device or both)
+ */
+void mhi_device_get(struct mhi_device *mhi_dev, int vote);
+
+/**
+ * mhi_device_get_sync - disable low power modes
+ * Synchronously disable device & or bus low power, exit low power mode if
+ * controller already in a low power state
+ * @mhi_dev: Device associated with the channels
+ * @vote: requested vote (bus, device or both)
+ */
+int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);
+
+/**
+ * mhi_device_put - re-enable low power modes
+ * @mhi_dev: Device associated with the channels
+ * @vote: vote to remove
+ */
+void mhi_device_put(struct mhi_device *mhi_dev, int vote);
+
+/**
+ * mhi_prepare_for_transfer - setup channel for data transfer
+ * Moves both UL and DL channel from RESET to START state
+ * @mhi_dev: Device associated with the channels
+ */
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_unprepare_from_transfer -unprepare the channels
+ * Moves both UL and DL channels to RESET state
+ * @mhi_dev: Device associated with the channels
+ */
+void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
+
+/**
+ * mhi_get_no_free_descriptors - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
+				enum dma_data_direction dir);
+
+/**
+ * mhi_poll - poll for any available data to consume
+ * This is only applicable for DL direction
+ * @mhi_dev: Device associated with the channels
+ * @budget: In descriptors to service before returning
+ */
+int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
+
+/**
+ * mhi_ioctl - user space IOCTL support for MHI channels
+ * Native support for setting  TIOCM
+ * @mhi_dev: Device associated with the channels
+ * @cmd: IOCTL cmd
+ * @arg: Optional parameter, iotcl cmd specific
+ */
+long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
+
+/**
+ * mhi_alloc_controller - Allocate mhi_controller structure
+ * Allocate controller structure and additional data for controller
+ * private data. You may get the private data pointer by calling
+ * mhi_controller_get_devdata
+ * @size: # of additional bytes to allocate
+ */
+struct mhi_controller *mhi_alloc_controller(size_t size);
+
+/**
+ * of_register_mhi_controller - Register MHI controller
+ * Registers MHI controller with MHI bus framework. DT must be supported
+ * @mhi_cntrl: MHI controller to register
+ */
+int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
+
+void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_bdf_to_controller - Look up a registered controller
+ * Search for controller based on device identification
+ * @domain: RC domain of the device
+ * @bus: Bus device connected to
+ * @slot: Slot device assigned to
+ * @dev_id: Device Identification
+ */
+struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
+					     u32 dev_id);
+
+/**
+ * mhi_prepare_for_power_up - Do pre-initialization before power up
+ * This is optional, call this before power up if controller do not
+ * want bus framework to automatically free any allocated memory during shutdown
+ * process.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_async_power_up - Starts MHI power up sequence
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
+int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_power_down - Start MHI power down sequence
+ * @mhi_cntrl: MHI controller
+ * @graceful: link is still accessible, do a graceful shutdown process otherwise
+ * we will shutdown host w/o putting device into RESET state
+ */
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
+ * mhi_unprepare_after_powre_down - free any allocated memory for power up
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * Transition to MHI state M3 state from M0||M1||M2 state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_fast_suspend - Move host into suspend state while keeping
+ * the device in active state.
+ * @mhi_cntrl: MHI controller
+ * @notify_client: if true, clients will get a notification about lpm transition
+ */
+int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * Transition to MHI state M0 state from M3 state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_fast_resume - Move host into resume state from fast suspend state
+ * @mhi_cntrl: MHI controller
+ * @notify_client: if true, clients will get a notification about lpm transition
+ */
+int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client);
+
+/**
+ * mhi_download_rddm_img - Download ramdump image from device for
+ * debugging purpose.
+ * @mhi_cntrl: MHI controller
+ * @in_panic: If we trying to capture image while in kernel panic
+ */
+int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
+
+/**
+ * mhi_force_rddm_mode - Force external device into rddm mode
+ * to collect device ramdump. This is useful if host driver assert
+ * and we need to see device state as well.
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_get_remote_time_sync - Get external soc time relative to local soc time
+ * using MMIO method.
+ * @mhi_dev: Device associated with the channels
+ * @t_host: Pointer to output local soc time
+ * @t_dev: Pointer to output remote soc time
+ */
+int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
+			     u64 *t_host,
+			     u64 *t_dev);
+
+/**
+ * mhi_get_mhi_state - Return MHI state of device
+ * @mhi_cntrl: MHI controller
+ */
+enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_set_mhi_state - Set device state
+ * @mhi_cntrl: MHI controller
+ * @state: state to set
+ */
+void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
+		       enum mhi_dev_state state);
+
+
+/**
+ * mhi_is_active - helper function to determine if MHI in active state
+ * @mhi_dev: client device
+ */
+static inline bool mhi_is_active(struct mhi_device *mhi_dev)
+{
+	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+	return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
+		mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
+}
+
+/**
+ * mhi_control_error - MHI controller went into unrecoverable error state.
+ * Will transition MHI into Linkdown state. Do not call from atomic
+ * context.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_control_error(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_debug_reg_dump - dump MHI registers for debug purpose
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
+
+#ifndef CONFIG_ARCH_QCOM
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_VERB(fmt, ...) do { \
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_VERBOSE) \
+			pr_dbg("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
+} while (0)
+
+#else
+
+#define MHI_VERB(fmt, ...)
+
+#endif
+
+#define MHI_LOG(fmt, ...) do {	\
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_INFO) \
+			pr_info("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
+} while (0)
+
+#define MHI_ERR(fmt, ...) do {	\
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
+			pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MHI_CRITICAL(fmt, ...) do { \
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
+			pr_alert("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else /* ARCH QCOM */
+
+#include <linux/ipc_logging.h>
+
+#ifdef CONFIG_MHI_DEBUG
+
+#define MHI_VERB(fmt, ...) do { \
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
+			pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
+		if (mhi_cntrl->log_buf && \
+		    (mhi_cntrl->log_lvl <= MHI_MSG_LVL_VERBOSE)) \
+			ipc_log_string(mhi_cntrl->log_buf, "[D][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+} while (0)
+
+#else
+
+#define MHI_VERB(fmt, ...)
+
+#endif
+
+#define MHI_LOG(fmt, ...) do {	\
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
+			pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
+		if (mhi_cntrl->log_buf && \
+		    (mhi_cntrl->log_lvl <= MHI_MSG_LVL_INFO)) \
+			ipc_log_string(mhi_cntrl->log_buf, "[I][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MHI_ERR(fmt, ...) do {	\
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
+			pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
+		if (mhi_cntrl->log_buf && \
+		    (mhi_cntrl->log_lvl <= MHI_MSG_LVL_ERROR)) \
+			ipc_log_string(mhi_cntrl->log_buf, "[E][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define MHI_CRITICAL(fmt, ...) do { \
+		if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
+			pr_err("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
+		if (mhi_cntrl->log_buf && \
+		    (mhi_cntrl->log_lvl <= MHI_MSG_LVL_CRITICAL)) \
+			ipc_log_string(mhi_cntrl->log_buf, "[C][%s] " fmt, \
+				       __func__, ##__VA_ARGS__); \
+} while (0)
+
+#endif
+
+#endif /* _MHI_H_ */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 579fd0a..85a43db 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -690,5 +690,17 @@
 	const char obj_type[16];
 };
 
+#define MHI_NAME_SIZE 32
+
+/**
+ * struct mhi_device_id - MHI device identification
+ * @chan: MHI channel name
+ * @driver_data: driver data;
+ */
+
+struct mhi_device_id {
+	const char chan[MHI_NAME_SIZE];
+	kernel_ulong_t driver_data;
+};
 
 #endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f656728..dc9c526 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -67,6 +67,12 @@
 /* Indicate backport support for processing user cell base hint */
 #define CFG80211_USER_HINT_CELL_BASE_SELF_MANAGED 1
 
+/* Indicate backport support for external authentication in AP mode */
+#define CFG80211_EXTERNAL_AUTH_AP_SUPPORT 1
+
+/* Indicate backport support for DH IE creation/update*/
+#define CFG80211_EXTERNAL_DH_UPDATE_SUPPORT 1
+
 /**
  * DOC: Introduction
  *
@@ -739,6 +745,17 @@
 };
 
 /**
+ * enum cfg80211_ap_settings_flags - AP settings flags
+ *
+ * Used by cfg80211_ap_settings
+ *
+ * @AP_SETTINGS_EXTERNAL_AUTH_SUPPORT: AP supports external authentication
+ */
+enum cfg80211_ap_settings_flags {
+	AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = BIT(0),
+};
+
+/**
  * struct cfg80211_ap_settings - AP configuration
  *
  * Used to configure an AP interface.
@@ -763,6 +780,7 @@
  * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
  *	networks.
  * @beacon_rate: bitrate to be used for beacons
+ * @flags: flags, as defined in enum cfg80211_ap_settings_flags
  */
 struct cfg80211_ap_settings {
 	struct cfg80211_chan_def chandef;
@@ -783,6 +801,7 @@
 	const struct cfg80211_acl_data *acl;
 	bool pbss;
 	struct cfg80211_bitrate_mask beacon_rate;
+	u32 flags;
 };
 
 /**
@@ -2582,6 +2601,7 @@
  *	use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space cannot give you
  *	the real status code for failures. Used only for the authentication
  *	response command interface (user space to driver).
+ * @pmkid: The identifier to refer a PMKSA.
  */
 struct cfg80211_external_auth_params {
 	enum nl80211_external_auth_action action;
@@ -2589,6 +2609,33 @@
 	struct cfg80211_ssid ssid;
 	unsigned int key_mgmt_suite;
 	u16 status;
+	const u8 *pmkid;
+};
+
+/**
+ * struct cfg80211_update_owe_info - OWE Information
+ *
+ * This structure provides information needed for the drivers to offload OWE
+ * (Opportunistic Wireless Encryption) processing to the user space.
+ *
+ * Commonly used across update_owe_info request and event interfaces.
+ *
+ * @peer: MAC address of the peer device for which the OWE processing
+ *	has to be done.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info
+ *	processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space
+ *	cannot give you the real status code for failures. Used only for
+ *	OWE update request command interface (user space to driver).
+ * @ie: IEs obtained from the peer or constructed by the user space. These are
+ *	the IEs of the remote peer in the event from the host driver and
+ *	the constructed IEs by the user space in the request interface.
+ * @ie_len: Length of IEs in octets.
+ */
+struct cfg80211_update_owe_info {
+	u8 peer[ETH_ALEN] __aligned(2);
+	u16 status;
+	const u8 *ie;
+	size_t ie_len;
 };
 
 /**
@@ -2903,6 +2950,10 @@
  *
  * @external_auth: indicates result of offloaded authentication processing from
  *     user space
+ *
+ * @update_owe_info: Provide updated OWE info to driver. Driver implementing SME
+ *	but offloading OWE processing to the user space will get the updated
+ *	DH IE through this interface.
  */
 struct cfg80211_ops {
 	int	(*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3189,6 +3240,8 @@
 					    const bool enabled);
 	int     (*external_auth)(struct wiphy *wiphy, struct net_device *dev,
 				 struct cfg80211_external_auth_params *params);
+	int	(*update_owe_info)(struct wiphy *wiphy, struct net_device *dev,
+				   struct cfg80211_update_owe_info *owe_info);
 };
 
 /*
@@ -6185,4 +6238,14 @@
 #define wiphy_WARN(wiphy, format, args...)			\
 	WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args);
 
+/**
+ * cfg80211_update_owe_info_event - Notify the peer's OWE info to user space
+ * @netdev: network device
+ * @owe_info: peer's owe info
+ * @gfp: allocation flags
+ */
+void cfg80211_update_owe_info_event(struct net_device *netdev,
+				    struct cfg80211_update_owe_info *owe_info,
+				    gfp_t gfp);
+
 #endif /* __NET_CFG80211_H */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 360a8bc..864943f 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -110,6 +110,7 @@
 #define IPA_IOCTL_GSB_DISCONNECT                62
 #define IPA_IOCTL_GET_PHERIPHERAL_EP_INFO       63
 #define IPA_IOCTL_GET_NAT_IN_SRAM_INFO          64
+#define IPA_IOCTL_APP_CLOCK_VOTE                65
 
 /**
  * max size of the header to be inserted
@@ -2309,6 +2310,10 @@
 				IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
 				struct ipa_nat_in_sram_info)
 
+#define IPA_IOC_APP_CLOCK_VOTE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_APP_CLOCK_VOTE, \
+				uint32_t)
+
 /*
  * unique magic number of the Tethering bridge ioctls
  */
@@ -2414,6 +2419,18 @@
 	uint32_t best_nat_in_sram_size_rqst;
 };
 
+/**
+ * enum ipa_app_clock_vote_type
+ *
+ * The types of votes that can be accepted by the
+ * IPA_IOC_APP_CLOCK_VOTE ioctl
+ */
+enum ipa_app_clock_vote_type {
+	IPA_APP_CLK_DEVOTE     = 0,
+	IPA_APP_CLK_VOTE       = 1,
+	IPA_APP_CLK_RESET_VOTE = 2,
+};
+
 #define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
 				TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
 				struct teth_ioc_set_bridge_mode *)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 39f920c..31817b8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1018,6 +1018,11 @@
  *	indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes
  *	determining the width and type.
  *
+ * @NL80211_CMD_UPDATE_OWE_INFO: This interface allows the host driver to
+ *	offload OWE processing to user space. This intends to support
+ *	OWE AKM by the host drivers that implement SME but rely
+ *	on the user space for the cryptographic/DH IE processing in AP mode.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -1238,6 +1243,8 @@
 
 	NL80211_CMD_NOTIFY_RADAR,
 
+	NL80211_CMD_UPDATE_OWE_INFO,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -2179,10 +2186,10 @@
  *     &enum nl80211_external_auth_action value). This is used with the
  *     &NL80211_CMD_EXTERNAL_AUTH request event.
  * @NL80211_ATTR_EXTERNAL_AUTH_SUPPORT: Flag attribute indicating that the user
- *     space supports external authentication. This attribute shall be used
- *     only with %NL80211_CMD_CONNECT request. The driver may offload
- *     authentication processing to user space if this capability is indicated
- *     in NL80211_CMD_CONNECT requests from the user space.
+ *	space supports external authentication. This attribute shall be used
+ *	with %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP request. The driver
+ *	may offload authentication processing to user space if this capability
+ *	is indicated in the respective requests from the user space.
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -5264,9 +5271,14 @@
  * Used by cfg80211_rx_mgmt()
  *
  * @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver.
+ * @NL80211_RXMGMT_FLAG_EXTERNAL_AUTH: Host driver intends to offload
+ *	the authentication. Exclusively defined for host drivers that
+ *	advertises the SME functionality but would like the userspace
+ *	to handle certain authentication algorithms (e.g. SAE).
  */
 enum nl80211_rxmgmt_flags {
 	NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0,
+	NL80211_RXMGMT_FLAG_EXTERNAL_AUTH = 1 << 1,
 };
 
 /*
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 80f0679..3e9b136 100755
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4027,6 +4027,9 @@
 			return PTR_ERR(params.acl);
 	}
 
+	if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
+		params.flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
+
 	wdev_lock(wdev);
 	err = rdev_start_ap(rdev, dev, &params);
 	if (!err) {
@@ -12096,7 +12099,9 @@
 	if (!rdev->ops->external_auth)
 		return -EOPNOTSUPP;
 
-	if (!info->attrs[NL80211_ATTR_SSID])
+	if (!info->attrs[NL80211_ATTR_SSID] &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
 		return -EINVAL;
 
 	if (!info->attrs[NL80211_ATTR_BSSID])
@@ -12107,21 +12112,52 @@
 
 	memset(&params, 0, sizeof(params));
 
-	params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
-	if (params.ssid.ssid_len == 0 ||
-	    params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
-		return -EINVAL;
-	memcpy(params.ssid.ssid, nla_data(info->attrs[NL80211_ATTR_SSID]),
-	       params.ssid.ssid_len);
+	if (info->attrs[NL80211_ATTR_SSID]) {
+		params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+		if (params.ssid.ssid_len == 0 ||
+		    params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
+			return -EINVAL;
+		memcpy(params.ssid.ssid,
+		       nla_data(info->attrs[NL80211_ATTR_SSID]),
+		       params.ssid.ssid_len);
+	}
 
 	memcpy(params.bssid, nla_data(info->attrs[NL80211_ATTR_BSSID]),
 	       ETH_ALEN);
 
 	params.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
 
+	if (info->attrs[NL80211_ATTR_PMKID])
+		params.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
+
 	return rdev_external_auth(rdev, dev, &params);
 }
 
+static int nl80211_update_owe_info(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct cfg80211_update_owe_info owe_info;
+	struct net_device *dev = info->user_ptr[1];
+
+	if (!rdev->ops->update_owe_info)
+		return -EOPNOTSUPP;
+
+	if (!info->attrs[NL80211_ATTR_STATUS_CODE] ||
+	    !info->attrs[NL80211_ATTR_MAC])
+		return -EINVAL;
+
+	memset(&owe_info, 0, sizeof(owe_info));
+	owe_info.status = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
+	nla_memcpy(owe_info.peer, info->attrs[NL80211_ATTR_MAC], ETH_ALEN);
+
+	if (info->attrs[NL80211_ATTR_IE]) {
+		owe_info.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+		owe_info.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+	}
+
+	return rdev_update_owe_info(rdev, dev, &owe_info);
+}
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -13019,6 +13055,13 @@
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL80211_CMD_UPDATE_OWE_INFO,
+		.doit = nl80211_update_owe_info,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+				  NL80211_FLAG_NEED_RTNL,
+	},
 };
 
 /* notification functions */
@@ -15059,6 +15102,46 @@
 }
 EXPORT_SYMBOL(cfg80211_external_auth_request);
 
+void cfg80211_update_owe_info_event(struct net_device *netdev,
+				    struct cfg80211_update_owe_info *owe_info,
+				    gfp_t gfp)
+{
+	struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+
+	trace_cfg80211_update_owe_info_event(wiphy, netdev, owe_info);
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_UPDATE_OWE_INFO);
+	if (!hdr)
+		goto nla_put_failure;
+
+	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, owe_info->peer))
+		goto nla_put_failure;
+
+	if (!owe_info->ie_len ||
+	    nla_put(msg, NL80211_ATTR_IE, owe_info->ie_len, owe_info->ie))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+
+	genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+				NL80211_MCGRP_MLME, gfp);
+	return;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_update_owe_info_event);
+
 /* initialisation/exit functions */
 
 int nl80211_init(void)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 091806d..a0ff6b1 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1168,4 +1168,17 @@
 	return ret;
 }
 
+static inline int rdev_update_owe_info(struct cfg80211_registered_device *rdev,
+				       struct net_device *dev,
+				       struct cfg80211_update_owe_info *oweinfo)
+{
+	int ret = -EOPNOTSUPP;
+
+	trace_rdev_update_owe_info(&rdev->wiphy, dev, oweinfo);
+	if (rdev->ops->update_owe_info)
+		ret = rdev->ops->update_owe_info(&rdev->wiphy, dev, oweinfo);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
 #endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 80ea75a..ef56df5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -3090,6 +3090,44 @@
 		  WIPHY_PR_ARG, NETDEV_PR_ARG,
 		  BOOL_TO_STR(__entry->enabled))
 );
+
+TRACE_EVENT(rdev_update_owe_info,
+	    TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		     struct cfg80211_update_owe_info *owe_info),
+	    TP_ARGS(wiphy, netdev, owe_info),
+	    TP_STRUCT__entry(WIPHY_ENTRY
+			     NETDEV_ENTRY
+			     MAC_ENTRY(peer)
+			     __field(u16, status)
+			     __dynamic_array(u8, ie, owe_info->ie_len)),
+	    TP_fast_assign(WIPHY_ASSIGN;
+			   NETDEV_ASSIGN;
+			   MAC_ASSIGN(peer, owe_info->peer);
+			   __entry->status = owe_info->status;
+			   memcpy(__get_dynamic_array(ie),
+				  owe_info->ie, owe_info->ie_len);),
+	    TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT
+		  " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+		  __entry->status)
+);
+
+TRACE_EVENT(cfg80211_update_owe_info_event,
+	    TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		     struct cfg80211_update_owe_info *owe_info),
+	    TP_ARGS(wiphy, netdev, owe_info),
+	    TP_STRUCT__entry(WIPHY_ENTRY
+			     NETDEV_ENTRY
+			     MAC_ENTRY(peer)
+			     __dynamic_array(u8, ie, owe_info->ie_len)),
+	    TP_fast_assign(WIPHY_ASSIGN;
+			   NETDEV_ASSIGN;
+			   MAC_ASSIGN(peer, owe_info->peer);
+			   memcpy(__get_dynamic_array(ie), owe_info->ie,
+				  owe_info->ie_len);),
+	    TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: " MAC_PR_FMT,
+		      WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
+);
+
 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
 
 #undef TRACE_INCLUDE_PATH