Merge "ARM: dts: msm: Enable TZ-log driver for Kona"
diff --git a/Documentation/devicetree/bindings/display/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/display/msm/mdss-dsi-panel.txt
index 43c9d0b..1f825e7 100644
--- a/Documentation/devicetree/bindings/display/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdss-dsi-panel.txt
@@ -173,7 +173,15 @@
 					"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
 					"bl_ctrl_wled" = Backlight controlled by WLED.
 					"bl_ctrl_dcs" = Backlight controlled by DCS commands.
+					"bl_ctrl_external" = Backlight controlled by externally
 					other: Unknown backlight control. (default)
+- qcom,mdss-dsi-sec-bl-pmic-control-type: A string that specifies the implementation of backlight
+					  control for secondary panel.
+					  "bl_ctrl_pwm" = Backlight controlled by PWM gpio.
+                                          "bl_ctrl_wled" = Backlight controlled by WLED.
+                                          "bl_ctrl_dcs" = Backlight controlled by DCS commands.
+                                          "bl_ctrl_external" = Backlight controlled by externally
+                                          other: Unknown backlight control. (default)
 - qcom,mdss-dsi-bl-pwm-pmi:		Boolean to indicate that PWM control is through second pmic chip.
 - qcom,mdss-dsi-bl-pmic-bank-select:	LPG channel for backlight.
 					Required if backlight pmic control type is PWM
@@ -187,6 +195,9 @@
 					255 = default value.
 - qcom,mdss-brightness-max-level:	Specifies the max brightness level supported.
 					255 = default value.
+- qcom,bl-update-flag:			A string that specifies controls for backlight update of the panel.
+					"delay_until_first_frame" = Delay backlight update of the panel
+					until the first frame is received from the HW.
 - qcom,mdss-dsi-interleave-mode:	Specifies interleave mode.
 					0 = default value.
 - qcom,mdss-dsi-panel-type:		Specifies the panel operating mode.
@@ -529,6 +540,8 @@
 					display. The first set is indexed by the
 					value 0.
 - qcom,mdss-dsi-ext-bridge-mode:	External bridge chip is connected instead of panel.
+- qcom,mdss-dsi-dma-schedule-line:	An integer value indicates the line number after vertical active
+					region, at which command DMA needs to be triggered.
 
 Required properties for sub-nodes:	None
 Optional properties:
@@ -604,6 +617,7 @@
 		qcom,mdss-dsi-bl-min-level = <1>;
 		qcom,mdss-dsi-bl-max-level = < 15>;
 		qcom,mdss-brightness-max-level = <255>;
+		qcom,bl-update-flag = "delay_until_first_frame";
 		qcom,mdss-dsi-interleave-mode = <0>;
 		qcom,mdss-dsi-panel-type = "dsi_video_mode";
 		qcom,mdss-dsi-te-check-enable;
@@ -634,8 +648,6 @@
 		qcom,mdss-dsi-mdp-trigger = <0>;
 		qcom,mdss-dsi-dma-trigger = <0>;
 		qcom,mdss-dsi-panel-framerate = <60>;
-		qcom,mdss-dsi-panel-clockrate = <424000000>;
-		qcom,mdss-mdp-transfer-time-us = <12500>;
 		qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
 					22 27 1e 03 04 00];
                 qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0
@@ -713,6 +725,8 @@
 				qcom,mdss-dsi-v-front-porch = <728>;
 				qcom,mdss-dsi-v-pulse-width = <4>;
 				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-clockrate = <424000000>;
+				qcom,mdss-mdp-transfer-time-us = <12500>;
 				qcom,mdss-dsi-panel-timings = [E6 38 26 00 68 6E 2A 3C 2C 03 04 00];
 				qcom,mdss-dsi-t-clk-post = <0x02>;
 				qcom,mdss-dsi-t-clk-pre = <0x2a>;
@@ -786,5 +800,6 @@
 		qcom,display-topology = <1 1 1>,
 			                <2 2 1>;
 		qcom,default-topology-index = <0>;
+		qcom,mdss-dsi-dma-schedule-line = <5>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/display/msm/mdss-pll.txt b/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
index d1e7929..c3f0ca7 100644
--- a/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
@@ -19,7 +19,8 @@
                         "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm",
                         "qcom,mdss_dsi_pll_7nm",   "qcom,mdss_dp_pll_7nm",
 			"qcom,mdss_dsi_pll_28lpm", "qcom,mdss_dsi_pll_14nm",
-			"qcom,mdss_dp_pll_14nm"
+			"qcom,mdss_dp_pll_14nm", "qcom,mdss_dsi_pll_7nm_v2",
+			"qcom,mdss_hdmi_pll_28lpm"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
 - reg-names :		names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/display/msm/sde-dp.txt b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
index c5494eb..a17b738 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-dp.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
@@ -67,8 +67,15 @@
 					within DP AUX, while the remaining entries indicate the
 					programmable values.
 - qcom,max-pclk-frequency-khz:	An integer specifying the max. pixel clock in KHz supported by Display Port.
+- qcom,mst-enable:		MST feature enable control node.
+- qcom,dsc-feature-enable:	DSC feature enable control node.
+- qcom,fec-feature-enable:	FEC feature enable control node.
+- qcom,max-dp-dsc-blks:		An integer specifying the max. DSC blocks available for Display port.
+- qcom,max-dp-dsc-input-width-pixs: An integer specifying the max. input width of pixels for each DSC block.
 - qcom,dp-usbpd-detection:	Phandle for the PMI regulator node for USB PHY PD detection.
 - qcom,dp-aux-switch:		Phandle for the driver used to program the AUX switch for Display Port orientation.
+- qcom,dp-hpd-gpio:		HPD gpio for direct DP connector without USB PHY or AUX switch.
+- qcom,dp-gpio-aux-switch:      Gpio DP AUX switch chipset support.
 - qcom,<type>-supply-entries:		A node that lists the elements of the supply used by the a particular "type" of DSI module. The module "types"
 					can be "core", "ctrl", and "phy". Within the same type,
 					there can be more than one instance of this binding,
@@ -84,11 +91,6 @@
 					-- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
 					-- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
 					-- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
-- pinctrl-names:	List of names to assign mdss pin states defined in pinctrl device node
-					Refer to pinctrl-bindings.txt
-- pinctrl-<0..n>:	Lists phandles each pointing to the pin configuration node within a pin
-					controller. These pin configurations are installed in the pinctrl
-					device node. Refer to pinctrl-bindings.txt
 
 msm_ext_disp is a device which manages the interaction between external
 display interfaces, e.g. Display Port, and the audio subsystem.
@@ -96,7 +98,13 @@
 Optional properties:
 - qcom,ext-disp:		phandle for msm-ext-display module
 - compatible:			Must be "qcom,msm-ext-disp"
+- qcom,dp-low-power-hw-hpd:	Low power hardware HPD feature enable control node
 - qcom,phy-version:		Phy version
+- pinctrl-names:		List of names to assign mdss pin states defined in pinctrl device node
+				Refer to pinctrl-bindings.txt
+- pinctrl-<0..n>:		Lists phandles each pointing to the pin configuration node within a pin
+				controller. These pin configurations are installed in the pinctrl
+				device node. Refer to pinctrl-bindings.txt
 
 [Optional child nodes]: These nodes are for devices which are
 dependent on msm_ext_disp. If msm_ext_disp is disabled then
@@ -170,6 +178,11 @@
 		qcom,aux-cfg8-settings = [3c bb];
 		qcom,aux-cfg9-settings = [40 03];
 		qcom,max-pclk-frequency-khz = <593470>;
+		qcom,mst-enable;
+		qcom,dsc-feature-enable;
+		qcom,fec-feature-enable;
+		qcom,max-dp-dsc-blks = <2>;
+		qcom,max-dp-dsc-input-width-pixs = <2048>;
 		pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
 		pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
 		pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
diff --git a/Documentation/devicetree/bindings/display/msm/sde-dsi.txt b/Documentation/devicetree/bindings/display/msm/sde-dsi.txt
index 2836f9f..2fe7775 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-dsi.txt
@@ -61,11 +61,13 @@
 Optional properties:
 - label:                  String to describe controller.
 - qcom,platform-te-gpio:  Specifies the gpio used for TE.
+- qcom,panel-te-source:  Specifies the source pin for Vsync from panel or WD Timer.
 - qcom,dsi-ctrl: handle to dsi controller device
 - qcom,dsi-phy: handle to dsi phy device
 - qcom,dsi-ctrl-num:		Specifies the DSI controllers to use
 - qcom,dsi-phy-num:		Specifies the DSI PHYs to use
 - qcom,dsi-select-clocks:	Specifies the required clocks to use
+- qcom,dsi-display-list:	Specifies the list of supported displays.
 - qcom,dsi-manager:       Specifies dsi manager is present
 - qcom,dsi-display:       Specifies dsi display is present
 - qcom,hdmi-display:      Specifies hdmi is present
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 009f83f..99de140 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -63,6 +63,11 @@
 				from register "mdp_phys" defined in reg property.
 - qcom,sde-pp-slave:		Array of flags indicating whether each ping pong
 				block may be configured as a pp slave.
+- qcom,sde-pp-merge-3d-id:	Array of index ID values for the merge 3d block
+				connected to each pingpong, starting at 0.
+- qcom,sde-merge-3d-off:	Array of offset addresses for the available
+				merge 3d blocks. These offsets are calculated
+				from register "mdp_phys" defined in reg property.
 - qcom,sde-intf-off:		Array of offset addresses for the available SDE
 				interface blocks that can drive data to a
 				panel controller. The offsets are calculated
@@ -101,6 +106,7 @@
 - qcom,sde-dsc-size:		A u32 value indicates the address range for each dsc.
 - qcom,sde-cdm-size:		A u32 value indicates the address range for each cdm.
 - qcom,sde-pp-size:		A u32 value indicates the address range for each pingpong.
+- qcom,sde-merge-3d-size:	A u32 value indicates the address range for each merge 3d.
 - qcom,sde-wb-size:		A u32 value indicates the address range for each writeback.
 - qcom,sde-len:			A u32 entry for SDE address range.
 - qcom,sde-intf-max-prefetch-lines:	Array of u32 values for max prefetch lines on
@@ -140,6 +146,8 @@
 				feature is available or not.
 - qcom,sde-has-idle-pc:		Boolean property to indicate if target has idle
 				power collapse feature available or not.
+- qcom,fullsize-va-map:		Boolean property to indicate smmu mapping range
+				for mdp should be full range (4GB).
 - qcom,sde-has-mixer-gc:	Boolean property to indicate if mixer has gamma correction
 				feature available or not.
 - qcom,sde-has-dest-scaler: 	Boolean property to indicate if destination scaler
@@ -433,6 +441,10 @@
 				for the mixer block. Possible values:
 				"primary" - preferred for primary display
 				"none" - no preference on display
+- qcom,sde-mixer-cwb-pref:  	A string array indicating the preferred mixer block.
+				for CWB. Possible values:
+				"cwb" - preferred for cwb
+				"none" - no preference on display
 - qcom,sde-ctl-display-pref:    A string array indicating the preferred display type
                                 for the ctl block. Possible values:
 				"primary" - preferred for primary display
@@ -532,6 +544,8 @@
 			0x00047000 0x0004a000>;
     qcom,sde-mixer-display-pref = "primary", "none",
 	                "none", "none";
+    qcom,sde-mixer-cwb-pref = "none", "none",
+	                "cwb", "none";
     qcom,sde-dspp-top-off = <0x1300>;
     qcom,sde-dspp-off = <0x00055000 0x00057000>;
     qcom,sde-dspp-ad-off = <0x24000 0x22800>;
@@ -613,6 +627,7 @@
     qcom,sde-highest-bank-bit = <15>;
     qcom,sde-has-mixer-gc;
     qcom,sde-has-idle-pc;
+    qcom,fullsize-va-map;
     qcom,sde-has-dest-scaler;
     qcom,sde-max-dest-scaler-input-linewidth = <2048>;
     qcom,sde-max-dest-scaler-output-linewidth = <2560>;
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
index dfc14f7..1e904ca 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -10,6 +10,9 @@
 - id-gpio: gpio for USB ID pin. See gpio binding.
 - vbus-gpio: gpio for USB VBUS pin.
 
+Optional properties:
+- vbus-out-gpio: gpio for enabling VBUS output (e.g. when entering host mode)
+
 Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
 	extcon_usb1 {
 		compatible = "linux,extcon-usb-gpio";
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index f5a0923..81641a6d 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -55,6 +55,7 @@
 - no-sdio: controller is limited to send sdio cmd during initialization
 - no-sd: controller is limited to send sd cmd during initialization
 - no-mmc: controller is limited to send mmc cmd during initialization
+- extcon: phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
 - fixed-emmc-driver-type: for non-removable eMMC, enforce this driver type.
   The value <n> is the driver type as specified in the eMMC specification
   (table 206 in spec version 5.1).
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 502b3b8..2dd42d7 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -1,7 +1,6 @@
-* Qualcomm SDHCI controller (sdhci-msm)
+Qualcomm Technologies, Inc. Standard Secure Digital Host Controller (SDHC)
 
-This file documents differences between the core properties in mmc.txt
-and the properties used by the sdhci-msm driver.
+Secure Digital Host Controller provides standard host interface to SD/MMC/SDIO cards.
 
 Required properties:
 - compatible: Should contain:
@@ -10,54 +9,177 @@
 		For SDCC version 5.0.0, MCI registers are removed from SDCC
 		interface and some registers are moved to HC. New compatible
 		string is added to support this change - "qcom,sdhci-msm-v5".
-- reg: Base address and length of the register in the following order:
-	- Host controller register map (required)
-	- SD Core register map (required)
-- interrupts: Should contain an interrupt-specifiers for the interrupts:
-	- Host controller interrupt (required)
-- pinctrl-names: Should contain only one value - "default".
-- pinctrl-0: Should specify pin control groups used for this controller.
-- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock-names.
-- clock-names: Should contain the following:
-	"iface" - Main peripheral bus clock (PCLK/HCLK - AHB Bus clock) (required)
-	"core"	- SDC MMC clock (MCLK) (required)
-	"bus"	- SDCC bus voter clock (optional)
-	"xo"	- TCXO clock (optional)
-	"cal"	- reference clock for RCLK delay calibration (optional)
-	"sleep"	- sleep clock for RCLK delay calibration (optional)
+  - reg : should contain SDHC, SD Core register map.
+  - reg-names : indicates various resources passed to driver (via reg proptery) by name.
+		Required "reg-names" are "hc_mem" and "core_mem"
+		optional ones are "tlmm_mem"
+  - interrupts : should contain SDHC interrupts.
+  - interrupt-names : indicates interrupts passed to driver (via interrupts property) by name.
+		      Required "interrupt-names" are "hc_irq" and "pwr_irq".
+  - <supply-name>-supply: phandle to the regulator device tree node
+			  Required "supply-name" are "vdd" and "vdd-io".
+
+Required alias:
+- The slot number is specified via an alias with the following format
+	'sdhc{n}' where n is the slot number.
+
+Optional Properties:
+	- interrupt-names - "status_irq". This status_irq will be used for card
+			     detection.
+	- qcom,bus-width - defines the bus I/O width that controller supports.
+			   Units - number of bits. The valid bus-width values are
+			   1, 4 and 8.
+	- qcom,nonremovable - specifies whether the card in slot is
+			      hot pluggable or hard wired.
+	- qcom,nonhotplug - specifies the card in slot is not hot pluggable.
+			    if card lost or removed manually at runtime, don't retry
+			    to redetect it until next reboot probe.
+	- qcom,bus-speed-mode - specifies supported bus speed modes by host.
+				The supported bus speed modes are :
+				"HS200_1p8v" - indicates that host can support HS200 at 1.8v.
+				"HS200_1p2v" - indicates that host can support HS200 at 1.2v.
+				"DDR_1p8v" - indicates that host can support DDR mode at 1.8v.
+				"DDR_1p2v" - indicates that host can support DDR mode at 1.2v.
+	  - qcom,bus-aggr-clk-rates: this is an array that specifies the frequency for
+				the bus-aggr-clk which should be set corresponding to the
+				frequency used from clk-rate. The Frequency of this clock
+				should be decided based on the power mode in which the
+				apps clk would run with frequency in clk-rates.
+	- qcom,devfreq,freq-table - specifies supported frequencies for clock scaling.
+				    Clock scaling logic shall toggle between these frequencies based
+				    on card load. In case the defined frequencies are over or below
+				    the supported card frequencies, they will be overridden
+				    during card init. In case this entry is not supplied,
+				    the driver will construct one based on the card
+				    supported max and min frequencies.
+				    The frequencies must be ordered from lowest to highest.
+	- qcom,pm-qos-irq-type - the PM QoS request type to be used for IRQ voting.
+	  Can be either "affine_cores" or "affine_irq". If not specified, will default
+	  to "affine_cores". Use "affine_irq" setting in case an IRQ balancer is active,
+	  and IRQ affinity changes during runtime.
+	- qcom,pm-qos-irq-cpu - specifies the CPU for which IRQ voting shall be done.
+	  If "affine_cores" was specified for property 'qcom,pm-qos-irq-type'
+	  then this property must be defined, and is not relevant otherwise.
+	- qcom,pm-qos-irq-latency - a tuple defining two latency values with which
+	  PM QoS IRQ voting shall be done. The first value is the latecy to be used
+	  when load is high (performance mode) and the second is for low loads
+	  (power saving mode).
+	- qcom,pm-qos-cpu-groups - defines cpu groups mapping.
+	  Each cell represnets a group, which is a cpu bitmask defining which cpus belong
+	  to that group.
+	- qcom,pm-qos-<mode>-latency-us - where <mode> is either "cmdq" or "legacy".
+	  An array of latency value tuples, each tuple corresponding to a cpu group in the order
+	  defined in property 'qcom,pm-qos-cpu-groups'. The first value is the latecy to be used
+	  when load is high (performance mode) and the second is for low loads
+	  (power saving mode). These values will be used for cpu group voting for
+	  command-queueing mode or legacy respectively.
+	- qcom,core_3_0v_support: an optional property that is used to fake
+	  3.0V support for SDIO devices.
+	- qcom,scaling-lower-bus-speed-mode:	specifies the lower bus speed mode to be used
+						during clock scaling. If this property is not
+						defined, then it falls back to the default HS
+						bus speed mode to maintain backward compatibility.
+	- qcom,sdr104-wa: On Certain chipsets, SDR104 mode might be unstable causing CRC errors
+			  on the interface. So there is a workaround implemented to skip printing
+			  register dumps on CRC errors and also downgrade bus speed mode to
+			  SDR50/DDR50 in case of continuous CRC errors. Set this flag to enable
+			  this workaround.
+	- qcom,restore-after-cx-collapse - specifies whether the SDCC registers contents need
+	  to be saved and restored by software when the CX Power Collapse feature is enabled.
+	  On certain chipsets, coming out of the CX Power Collapse event, the SDCC registers
+	  contents will not be retained. It is software responsibility to restore the
+	  SDCC registers before resuming to normal operation.
+	- qcom,force-sdhc1-probe: Force probing sdhc1 even if it is not the boot device.
+	- qcom,ddr-config: Certain chipsets and platforms require particular settings for
+			   the RCLK delay DLL configuration register for HS400 mode to work.
+			   This value can vary between platforms and msms. If a msm/platform
+			   require a different DLL setting than the default/POR setting for
+			   HS400 mode, it can be specified using this field.
+In the following, <supply> can be vdd (flash core voltage) or vdd-io (I/O voltage).
+	- qcom,<supply>-always-on - specifies whether supply should be kept "on" always.
+	- qcom,<supply>-lpm_sup - specifies whether supply can be kept in low power mode (lpm).
+	- qcom,<supply>-voltage_level - specifies voltage levels for supply. Should be
+					specified in pairs (min, max), units uV.
+	- qcom,<supply>-current_level - specifies load levels for supply in lpm or
+					high power mode (hpm). Should be specified in
+					pairs (lpm, hpm), units uA.
+
+	- gpios - specifies gpios assigned for sdhc slot.
+	- qcom,gpio-names -  a list of strings that map in order to the list of gpios
+
+	Tlmm pins are specified as <clk cmd data> and starting with eMMC5.0 as
+	<clk cmd data rclk>
+
+	- Refer to "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+	  for following optional properties:
+		- pinctrl-names
+		- pinctrl-0, pinctrl-1,.. pinctrl-n
+
+	- qcom,large-address-bus - specifies whether the soc is capable of
+				 supporting larger than 32 bit address bus width.
+
+	- qcom,wakeup-on-idle: if configured, the mmcqd thread will call
+	  set_wake_up_idle(), thereby voting for it to be called on idle CPUs.
+
+	- qcom,wakeup-on-idle: if configured, the mmcqd thread will call
+	  set_wake_up_idle(), thereby voting for it to be called on idle CPUs.
 
 Example:
 
-	sdhc_1: sdhci@f9824900 {
-		compatible = "qcom,sdhci-msm-v4";
-		reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
-		interrupts = <0 123 0>;
-		bus-width = <8>;
-		non-removable;
-
-		vmmc-supply = <&pm8941_l20>;
-		vqmmc-supply = <&pm8941_s3>;
-
-		pinctrl-names = "default";
-		pinctrl-0 = <&sdc1_clk &sdc1_cmd &sdc1_data>;
-
-		clocks = <&gcc GCC_SDCC1_APPS_CLK>, <&gcc GCC_SDCC1_AHB_CLK>;
-		clock-names = "core", "iface";
+	aliases {
+		sdhc1 = &sdhc_1;
 	};
 
-	sdhc_2: sdhci@f98a4900 {
-		compatible = "qcom,sdhci-msm-v4";
-		reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
-		interrupts = <0 125 0>;
-		bus-width = <4>;
-		cd-gpios = <&msmgpio 62 0x1>;
+	sdhc_1: qcom,sdhc@f9824900 {
+		compatible = "qcom,sdhci-msm";
+                reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
+                reg-names = "hc_mem", "core_mem";
+                interrupts = <0 123 0>, <0 138 0>;
+                interrupt-names = "hc_irq", "pwr_irq";
 
-		vmmc-supply = <&pm8941_l21>;
-		vqmmc-supply = <&pm8941_l13>;
+		vdd-supply = <&pm8941_l21>;
+		vdd-io-supply = <&pm8941_l13>;
+		qcom,vdd-voltage-level = <2950000 2950000>;
+		qcom,vdd-current-level = <9000 800000>;
 
-		pinctrl-names = "default";
-		pinctrl-0 = <&sdc2_clk &sdc2_cmd &sdc2_data>;
+		qcom,vdd-io-always-on;
+		qcom,vdd-io-lpm-sup;
+		qcom,vdd-io-voltage-level = <1800000 2950000>;
+		qcom,vdd-io-current-level = <6 22000>;
 
-		clocks = <&gcc GCC_SDCC2_APPS_CLK>, <&gcc GCC_SDCC2_AHB_CLK>;
-		clock-names = "core", "iface";
+		qcom,devfreq,freq-table = <52000000 200000000>;
+
+		qcom,devfreq,freq-table = <52000000 200000000>;
+
+		pinctrl-names = "active", "sleep";
+		pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+		pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_on &sdc1_data_on>;
+
+
+                qcom,bus-width = <4>;
+		qcom,nonremovable;
+		qcom,large-address-bus;
+		qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+		qcom,scaling-lower-bus-speed-mode = "DDR52";
+
+		gpios = <&msmgpio 40 0>, /* CLK */
+			<&msmgpio 39 0>, /* CMD */
+			<&msmgpio 38 0>, /* DATA0 */
+			<&msmgpio 37 0>, /* DATA1 */
+			<&msmgpio 36 0>, /* DATA2 */
+			<&msmgpio 35 0>; /* DATA3 */
+		qcom,gpio-names = "CLK", "CMD", "DAT0", "DAT1", "DAT2", "DAT3";
+
+		qcom,pm-qos-irq-type = "affine_cores";
+		qcom,pm-qos-irq-cpu = <0>;
+		qcom,pm-qos-irq-latency = <500 100>;
+		qcom,pm-qos-cpu-groups = <0x03 0x0c>;
+		qcom,pm-qos-cmdq-latency-us = <50 100>, <50 100>;
+		qcom,pm-qos-legacy-latency-us = <50 100>, <50 100>;
+	};
+
+	sdhc_2: qcom,sdhc@f98a4900 {
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <120 200>;
 	};
diff --git a/Documentation/devicetree/bindings/nfc/nq-nci.txt b/Documentation/devicetree/bindings/nfc/nq-nci.txt
new file mode 100644
index 0000000..c349e83
--- /dev/null
+++ b/Documentation/devicetree/bindings/nfc/nq-nci.txt
@@ -0,0 +1,49 @@
+Qualcomm Technologies, Inc NQxxxx NFC NCI device
+
+Near Field Communication (NFC) device is based on NFC Controller Interface (NCI)
+
+Required properties:
+
+- compatible: "qcom,nq-nci"
+- reg: NCI i2c slave address.
+- qcom,nq-ven: specific gpio for hardware reset.
+- qcom,nq-irq: specific gpio for read interrupt.
+- qcom,nq-firm: gpio for firmware download
+- qcom,nq-clkreq: gpio for clock
+- interrupt-parent: Should be phandle for the interrupt controller
+                    that services interrupts for this device.
+- interrupts: Nfc read interrupt,gpio-clk-req interrupt
+
+
+Recommended properties:
+
+- interrupt-names: names of interrupts, should include "nfc_irq", used for reference
+
+
+Optional properties:
+
+- pinctrl-names, pinctrl-0, pincntrl-1: references to our pincntrl settings
+- clocks, clock-names: must contain the NQxxxx's core clock.
+- qcom,nq-esepwr: gpio to control power of secure element
+- qcom,clk-src: NFC clock for antenna
+
+Example:
+
+	nq-nci@2b {
+		compatible = "qcom,nq-nci";
+		reg = <0x2b>;
+		qcom,nq-irq = <&tlmm 29 0x00>;
+		qcom,nq-ven = <&tlmm 30 0x00>;
+		qcom,nq-firm = <&tlmm 93 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		qcom,clk-src = "BBCLK2";
+		interrupt-parent = <&tlmm>;
+		interrupts = <29 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active","nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		clocks = <&clock_rpm clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 4ad0bb1..05ee384 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -8,6 +8,40 @@
 
 	force_ro		Enforce read-only access even if write protect switch is off.
 
+	num_wr_reqs_to_start_packing 	This attribute is used to determine
+	the trigger for activating the write packing, in case the write
+	packing control feature is enabled.
+
+	When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+	write requests could be packed, it enables the write packing feature.
+	This allows us to start the write packing only when it is beneficial
+	and has minimum affect on the read latency.
+
+	The number of potential packed requests that will trigger the packing
+	can be configured via sysfs by writing the required value to:
+	/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
+
+	The default value of num_wr_reqs_to_start_packing was determined by
+	running parallel lmdd write and lmdd read operations and calculating
+	the max number of packed writes requests.
+
+	num_wr_reqs_to_start_packing 	This attribute is used to determine
+	the trigger for activating the write packing, in case the write
+	packing control feature is enabled.
+
+	When the MMC manages to reach a point where num_wr_reqs_to_start_packing
+	write requests could be packed, it enables the write packing feature.
+	This allows us to start the write packing only when it is beneficial
+	and has minimum affect on the read latency.
+
+	The number of potential packed requests that will trigger the packing
+	can be configured via sysfs by writing the required value to:
+	/sys/block/<block_dev_name>/num_wr_reqs_to_start_packing.
+
+	The default value of num_wr_reqs_to_start_packing was determined by
+	running parallel lmdd write and lmdd read operations and calculating
+	the max number of packed writes requests.
+
 SD and MMC Device Attributes
 ============================
 
@@ -75,3 +109,51 @@
 	"raw_rpmb_size_mult" is a multiple of 128kB block.
 	RPMB size in byte is calculated by using the following equation:
 	RPMB partition size = 128kB x raw_rpmb_size_mult
+
+SD/MMC/SDIO Clock Gating Attribute
+==================================
+
+Read and write access is provided to following attribute.
+This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
+
+	clkgate_delay	Tune the clock gating delay with desired value in milliseconds.
+
+echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
+
+SD/MMC/SDIO Clock Scaling Attributes
+====================================
+
+Read and write accesses are provided to following attributes.
+
+	polling_interval	Measured in milliseconds, this attribute
+				defines how often we need to check the card
+				usage and make decisions on frequency scaling.
+
+	up_threshold		This attribute defines what should be the
+				average card usage between the polling
+				interval for the mmc core to make a decision
+				on whether it should increase the frequency.
+				For example when it is set to '35' it means
+				that between the checking intervals the card
+				needs to be on average more than 35% in use to
+				scale up the frequency. The value should be
+				between 0 - 100 so that it can be compared
+				against load percentage.
+
+	down_threshold		Similar to up_threshold, but on lowering the
+				frequency. For example, when it is set to '2'
+				it means that between the checking intervals
+				the card needs to be on average less than 2%
+				in use to scale down the clocks to minimum
+				frequency. The value should be between 0 - 100
+				so that it can be compared against load
+				percentage.
+
+	enable			Enable clock scaling for hosts (and cards)
+				that support ultrahigh speed modes
+				(SDR104, DDR50, HS200).
+
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/polling_interval
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/up_threshold
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/down_threshold
+echo <desired value> > /sys/class/mmc_host/mmcX/clk_scaling/enable
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index 05b9be8..1fa6cc0 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -5,6 +5,7 @@
 
 
 #include "kona-pmic-overlay.dtsi"
+#include "kona-sde-display.dtsi"
 #include "kona-camera-sensor-cdp.dtsi"
 #include "kona-audio-overlay.dtsi"
 
@@ -59,3 +60,187 @@
 		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
 	};
 };
+
+&pm8150b_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	wp_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "wp_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	conn_therm@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		label = "conn_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	chg_sbux@99 {
+		reg = <ADC_SBUx>;
+		label = "chg_sbux";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	mid_chg_div6@1e {
+		reg = <ADC_MID_CHG_DIV6>;
+		label = "chg_mid";
+		qcom,pre-scaling = <1 6>;
+	};
+
+	usb_in_i_uv@7 {
+		reg = <ADC_USB_IN_I>;
+		label = "usb_in_i_uv";
+		qcom,pre-scaling = <1 1>;
+	};
+
+	usb_in_v_div_16@8 {
+		reg = <ADC_USB_IN_V_16>;
+		label = "usb_in_v_div_16";
+		qcom,pre-scaling = <1 16>;
+	};
+};
+
+&pm8150_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	vcoin@85 {
+		reg = <ADC_VCOIN>;
+		label = "vcoin";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	xo_therm@4c {
+		reg = <ADC_XO_THERM_PU2>;
+		label = "xo_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	skin_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "skin_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	pa_therm1@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		label = "pa_therm1";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+};
+
+&pm8150l_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	camera_flash_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "camera_flash_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	skin_msm_therm@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		label = "skin_msm_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	pa_therm2@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		label = "pa_therm2";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+};
+
+&pm8150b_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	wp_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
+
+&pm8150_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	xo_therm@4c {
+		reg = <ADC_XO_THERM_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	skin_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	pa_therm1@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
+
+&pm8150l_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	camera_flash_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	skin_msm_therm@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	pa_therm2@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index a26983c..1349b63 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -5,6 +5,7 @@
 
 
 #include "kona-pmic-overlay.dtsi"
+#include "kona-sde-display.dtsi"
 #include "kona-camera-sensor-mtp.dtsi"
 #include "kona-audio-overlay.dtsi"
 
@@ -59,3 +60,187 @@
 		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
 	};
 };
+
+&pm8150b_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	wp_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "wp_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	conn_therm@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		label = "conn_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	chg_sbux@99 {
+		reg = <ADC_SBUx>;
+		label = "chg_sbux";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	mid_chg_div6@1e {
+		reg = <ADC_MID_CHG_DIV6>;
+		label = "chg_mid";
+		qcom,pre-scaling = <1 6>;
+	};
+
+	usb_in_i_uv@7 {
+		reg = <ADC_USB_IN_I>;
+		label = "usb_in_i_uv";
+		qcom,pre-scaling = <1 1>;
+	};
+
+	usb_in_v_div_16@8 {
+		reg = <ADC_USB_IN_V_16>;
+		label = "usb_in_v_div_16";
+		qcom,pre-scaling = <1 16>;
+	};
+};
+
+&pm8150_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	vcoin@85 {
+		reg = <ADC_VCOIN>;
+		label = "vcoin";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	xo_therm@4c {
+		reg = <ADC_XO_THERM_PU2>;
+		label = "xo_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	skin_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "skin_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	pa_therm1@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		label = "pa_therm1";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+};
+
+&pm8150l_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	camera_flash_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "camera_flash_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	skin_msm_therm@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		label = "skin_msm_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	pa_therm2@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		label = "pa_therm2";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+};
+
+&pm8150b_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	wp_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
+
+&pm8150_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	xo_therm@4c {
+		reg = <ADC_XO_THERM_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	skin_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	pa_therm1@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
+
+&pm8150l_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	camera_flash_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	skin_msm_therm@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	pa_therm2@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index 93afdadc..338e057 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -238,6 +238,190 @@
 			};
 		};
 
+		storage_cd: storage_cd {
+			mux {
+				pins = "gpio77";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio77";
+				bias-pull-up;           /* pull up */
+				drive-strength = <2>;   /* 2 MA */
+			};
+		};
+
+		sdc2_clk_on: sdc2_clk_on {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_off: sdc2_clk_off {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_clk_ds_400KHz: sdc2_clk_ds_400KHz {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_ds_50MHz: sdc2_clk_ds_50MHz {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_ds_100MHz: sdc2_clk_ds_100MHz {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_ds_200MHz: sdc2_clk_ds_200MHz {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_cmd_on: sdc2_cmd_on {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_cmd_off: sdc2_cmd_off {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cmd_ds_400KHz: sdc2_cmd_ds_400KHz {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_cmd_ds_50MHz: sdc2_cmd_ds_50MHz {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_cmd_ds_100MHz: sdc2_cmd_ds_100MHz {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_cmd_ds_200MHz: sdc2_cmd_ds_200MHz {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_data_on: sdc2_data_on {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_data_off: sdc2_data_off {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_data_ds_400KHz: sdc2_data_ds_400KHz {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_data_ds_50MHz: sdc2_data_ds_50MHz {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_data_ds_100MHz: sdc2_data_ds_100MHz {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_data_ds_200MHz: sdc2_data_ds_200MHz {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		/* add pins for DisplayPort */
+		sde_dp_usbplug_cc_active: sde_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio65";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio65";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		sde_dp_usbplug_cc_suspend: sde_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio65";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio65";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
 		ap2mdm {
 			ap2mdm_active: ap2mdm_active {
 				mux {
@@ -2699,6 +2883,262 @@
 			};
 		};
 
+		qupv3_se0_spi_pins: qupv3_se0_spi_pins {
+			qupv3_se0_spi_active: qupv3_se0_spi_active {
+				mux {
+					pins = "gpio28", "gpio29", "gpio30",
+								"gpio31";
+					function = "qup0";
+				};
+
+				config {
+					pins = "gpio28", "gpio29", "gpio30",
+								"gpio31";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se0_spi_sleep: qupv3_se0_spi_sleep {
+				mux {
+					pins = "gpio28", "gpio29", "gpio30",
+								"gpio31";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio28", "gpio29", "gpio30",
+								"gpio31";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se1_spi_pins: qupv3_se1_spi_pins {
+			qupv3_se1_spi_active: qupv3_se1_spi_active {
+				mux {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					function = "qup1";
+				};
+
+				config {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se1_spi_sleep: qupv3_se1_spi_sleep {
+				mux {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio4", "gpio5", "gpio6",
+								"gpio7";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se2_spi_pins: qupv3_se2_spi_pins {
+			qupv3_se2_spi_active: qupv3_se2_spi_active {
+				mux {
+					pins = "gpio115", "gpio116", "gpio117",
+								"gpio118";
+					function = "qup2";
+				};
+
+				config {
+					pins = "gpio115", "gpio116", "gpio117",
+								"gpio118";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se2_spi_sleep: qupv3_se2_spi_sleep {
+				mux {
+					pins = "gpio115", "gpio116", "gpio117",
+								"gpio118";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio115", "gpio116", "gpio117",
+								"gpio118";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se3_spi_pins: qupv3_se3_spi_pins {
+			qupv3_se3_spi_active: qupv3_se3_spi_active {
+				mux {
+					pins = "gpio119", "gpio120", "gpio121",
+								"gpio122";
+					function = "qup3";
+				};
+
+				config {
+					pins = "gpio119", "gpio120", "gpio121",
+								"gpio122";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se3_spi_sleep: qupv3_se3_spi_sleep {
+				mux {
+					pins = "gpio119", "gpio120", "gpio121",
+							"gpio122";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio119", "gpio120", "gpio121",
+							"gpio122";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se4_spi_pins: qupv3_se4_spi_pins {
+			qupv3_se4_spi_active: qupv3_se4_spi_active {
+				mux {
+					pins = "gpio8", "gpio9", "gpio10",
+								"gpio11";
+					function = "qup4";
+				};
+
+				config {
+					pins = "gpio8", "gpio9", "gpio10",
+								"gpio11";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se4_spi_sleep: qupv3_se4_spi_sleep {
+				mux {
+					pins = "gpio8", "gpio9", "gpio10",
+								"gpio11";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio8", "gpio9", "gpio10",
+								"gpio11";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se5_spi_pins: qupv3_se5_spi_pins {
+			qupv3_se5_spi_active: qupv3_se5_spi_active {
+				mux {
+					pins = "gpio12", "gpio13", "gpio14",
+								"gpio15";
+					function = "qup5";
+				};
+
+				config {
+					pins = "gpio12", "gpio13", "gpio14",
+								"gpio15";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se5_spi_sleep: qupv3_se5_spi_sleep {
+				mux {
+					pins = "gpio12", "gpio13", "gpio14",
+								"gpio15";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "13", "gpio14",
+								"gpio15";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se6_spi_pins: qupv3_se6_spi_pins {
+			qupv3_se6_spi_active: qupv3_se6_spi_active {
+				mux {
+					pins = "gpio16", "gpio17", "gpio18",
+								"gpio19";
+					function = "qup6";
+				};
+
+				config {
+					pins = "gpio16", "gpio17", "gpio18",
+								"gpio19";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se6_spi_sleep: qupv3_se6_spi_sleep {
+				mux {
+					pins = "gpio16", "gpio17", "gpio18",
+								"gpio19";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio16", "gpio17", "gpio18",
+								"gpio19";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se7_spi_pins: qupv3_se7_spi_pins {
+			qupv3_se7_spi_active: qupv3_se7_spi_active {
+				mux {
+					pins = "gpio20", "gpio21", "gpio22",
+								"gpio23";
+					function = "qup7";
+				};
+
+				config {
+					pins = "gpio20", "gpio21", "gpio22",
+								"gpio23";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se7_spi_sleep: qupv3_se7_spi_sleep {
+				mux {
+					pins = "gpio20", "gpio21", "gpio22",
+								"gpio23";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio20", "gpio21", "gpio22",
+								"gpio23";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
 		/* QUPv3_1 South_1 SE mappings */
 		/* SE 8 pin mappings */
 		qupv3_se8_i2c_pins: qupv3_se8_i2c_pins {
@@ -2874,6 +3314,198 @@
 			};
 		};
 
+		qupv3_se8_spi_pins: qupv3_se8_spi_pins {
+			qupv3_se8_spi_active: qupv3_se8_spi_active {
+				mux {
+					pins = "gpio24", "gpio25", "gpio26",
+								"gpio27";
+					function = "qup8";
+				};
+
+				config {
+					pins = "gpio24", "gpio25", "gpio26",
+								"gpio27";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se8_spi_sleep: qupv3_se8_spi_sleep {
+				mux {
+					pins = "gpio24", "gpio25", "gpio26",
+								"gpio27";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio24", "gpio25", "gpio26",
+								"gpio27";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se9_spi_pins: qupv3_se9_spi_pins {
+			qupv3_se9_spi_active: qupv3_se9_spi_active {
+				mux {
+					pins = "gpio125", "gpio126", "gpio127",
+								"gpio128";
+					function = "qup9";
+				};
+
+				config {
+					pins = "gpio125", "gpio126", "gpio127",
+								"gpio128";
+					drive-strength = <6>;
+					bias-disable;
+				};
+		};
+
+			qupv3_se9_spi_sleep: qupv3_se9_spi_sleep {
+				mux {
+					pins = "gpio125", "gpio126", "gpio127",
+								"gpio128";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio125", "gpio126", "gpio127",
+								"gpio128";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se10_spi_pins: qupv3_se10_spi_pins {
+			qupv3_se10_spi_active: qupv3_se10_spi_active {
+				mux {
+					pins = "gpio129", "gpio130", "gpio131",
+								"gpio132";
+					function = "qup10";
+				};
+
+				config {
+					pins = "gpio129", "gpio130", "gpio131",
+								"gpio132";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se10_spi_sleep: qupv3_se10_spi_sleep {
+				mux {
+					pins = "gpio129", "gpio130", "gpio131",
+								"gpio132";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio129", "gpio130", "gpio131",
+								"gpio132";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se11_spi_pins: qupv3_se11_spi_pins {
+			qupv3_se11_spi_active: qupv3_se11_spi_active {
+				mux {
+					pins = "gpio60", "gpio61", "gpio62",
+								"gpio63";
+					function = "qup11";
+				};
+
+				config {
+					pins = "gpio60", "gpio61", "gpio62",
+								"gpio63";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se11_spi_sleep: qupv3_se11_spi_sleep {
+				mux {
+					pins = "gpio60", "gpio61", "gpio62",
+								"gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio60", "gpio61", "gpio62",
+								"gpio63";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se12_spi_pins: qupv3_se12_spi_pins {
+			qupv3_se12_spi_active: qupv3_se12_spi_active {
+				mux {
+					pins = "gpio32", "gpio33", "gpio34",
+								"gpio35";
+					function = "qup12";
+				};
+
+				config {
+					pins = "gpio32", "gpio33", "gpio34",
+								"gpio35";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se12_spi_sleep: qupv3_se12_spi_sleep {
+				mux {
+					pins = "gpio32", "gpio33", "gpio34",
+								"gpio35";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio32", "gpio33", "gpio34",
+								"gpio35";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se13_spi_pins: qupv3_se13_spi_pins {
+			qupv3_se13_spi_active: qupv3_se13_spi_active {
+				mux {
+					pins = "gpio36", "gpio37", "gpio38",
+								"gpio39";
+					function = "qup13";
+				};
+
+				config {
+					pins = "gpio36", "gpio37", "gpio38",
+								"gpio39";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se13_spi_sleep: qupv3_se13_spi_sleep {
+				mux {
+					pins = "gpio36", "gpio37", "gpio38",
+								"gpio39";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio36", "gpio37", "gpio38",
+								"gpio39";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
 		/* QUPv3_2 South_2 SE mappings */
 		/* SE 14 pin mappings */
 		qupv3_se14_i2c_pins: qupv3_se14_i2c_pins {
@@ -3048,5 +3680,197 @@
 				};
 			};
 		};
+
+		qupv3_se14_spi_pins: qupv3_se14_spi_pins {
+			qupv3_se14_spi_active: qupv3_se14_spi_active {
+				mux {
+					pins = "gpio40", "gpio41", "gpio42",
+								"gpio43";
+					function = "qup14";
+				};
+
+				config {
+					pins = "gpio40", "gpio41", "gpio42",
+								"gpio43";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se14_spi_sleep: qupv3_se14_spi_sleep {
+				mux {
+					pins = "gpio40", "gpio41", "gpio42",
+								"gpio43";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio40", "gpio41", "gpio42",
+								"gpio43";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se15_spi_pins: qupv3_se15_spi_pins {
+			qupv3_se15_spi_active: qupv3_se15_spi_active {
+				mux {
+					pins = "gpio44", "gpio45", "gpio46",
+								"gpio47";
+					function = "qup15";
+				};
+
+				config {
+					pins = "gpio44", "gpio45", "gpio46",
+								"gpio47";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se15_spi_sleep: qupv3_se15_spi_sleep {
+				mux {
+					pins = "gpio44", "gpio45", "gpio46",
+								"gpio47";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio44", "gpio45", "gpio46",
+								"gpio47";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se16_spi_pins: qupv3_se16_spi_pins {
+			qupv3_se16_spi_active: qupv3_se16_spi_active {
+				mux {
+					pins = "gpio48", "gpio49", "gpio50",
+								"gpio51";
+					function = "qup16";
+				};
+
+				config {
+					pins = "gpio48", "gpio49", "gpio50",
+								"gpio51";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se16_spi_sleep: qupv3_se16_spi_sleep {
+				mux {
+					pins = "gpio48", "gpio49", "gpio50",
+								"gpio51";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio48", "gpio49", "gpio50",
+								"gpio51";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se17_spi_pins: qupv3_se17_spi_pins {
+			qupv3_se17_spi_active: qupv3_se17_spi_active {
+				mux {
+					pins = "gpio52", "gpio53", "gpio54",
+								"gpio55";
+					function = "qup17";
+				};
+
+				config {
+					pins = "gpio52", "gpio53", "gpio54",
+								"gpio55";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se17_spi_sleep: qupv3_se17_spi_sleep {
+				mux {
+					pins = "gpio52", "gpio53", "gpio54",
+								"gpio55";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio52", "gpio53", "gpio54",
+								"gpio55";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se18_spi_pins: qupv3_se18_spi_pins {
+			qupv3_se18_spi_active: qupv3_se18_spi_active {
+				mux {
+					pins = "gpio56", "gpio57", "gpio58",
+								"gpio59";
+					function = "qup18";
+				};
+
+				config {
+					pins = "gpio56", "gpio57", "gpio58",
+								"gpio59";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se18_spi_sleep: qupv3_se18_spi_sleep {
+				mux {
+					pins = "gpio56", "gpio57", "gpio58",
+								"gpio59";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio56", "gpio57", "gpio58",
+								"gpio59";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
+
+		qupv3_se19_spi_pins: qupv3_se19_spi_pins {
+			qupv3_se19_spi_active: qupv3_se19_spi_active {
+				mux {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					function = "qup19";
+				};
+
+				config {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+
+			qupv3_se19_spi_sleep: qupv3_se19_spi_sleep {
+				mux {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio0", "gpio1", "gpio2",
+								"gpio3";
+					drive-strength = <6>;
+					bias-disable;
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
index a035281..c7ab067 100644
--- a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
@@ -4,6 +4,7 @@
  */
 
 #include "kona-pmic-overlay.dtsi"
+#include "kona-sde-display.dtsi"
 #include "kona-audio-overlay.dtsi"
 
 &qupv3_se12_2uart {
@@ -80,3 +81,187 @@
 
 	status = "ok";
 };
+
+&pm8150b_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	wp_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "wp_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	conn_therm@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		label = "conn_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	chg_sbux@99 {
+		reg = <ADC_SBUx>;
+		label = "chg_sbux";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	mid_chg_div6@1e {
+		reg = <ADC_MID_CHG_DIV6>;
+		label = "chg_mid";
+		qcom,pre-scaling = <1 6>;
+	};
+
+	usb_in_i_uv@7 {
+		reg = <ADC_USB_IN_I>;
+		label = "usb_in_i_uv";
+		qcom,pre-scaling = <1 1>;
+	};
+
+	usb_in_v_div_16@8 {
+		reg = <ADC_USB_IN_V_16>;
+		label = "usb_in_v_div_16";
+		qcom,pre-scaling = <1 16>;
+	};
+};
+
+&pm8150_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	vcoin@85 {
+		reg = <ADC_VCOIN>;
+		label = "vcoin";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	xo_therm@4c {
+		reg = <ADC_XO_THERM_PU2>;
+		label = "xo_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	skin_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "skin_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	pa_therm1@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		label = "pa_therm1";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+};
+
+&pm8150l_vadc {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	vph_pwr@83 {
+		reg = <ADC_VPH_PWR>;
+		label = "vph_pwr";
+		qcom,pre-scaling = <1 3>;
+	};
+
+	camera_flash_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		label = "camera_flash_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	skin_msm_therm@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		label = "skin_msm_therm";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+
+	pa_therm2@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		label = "pa_therm2";
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+		qcom,pre-scaling = <1 1>;
+	};
+};
+
+&pm8150b_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	wp_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
+
+&pm8150_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	xo_therm@4c {
+		reg = <ADC_XO_THERM_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	skin_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	pa_therm1@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
+
+&pm8150l_adc_tm {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	camera_flash_therm@4d {
+		reg = <ADC_AMUX_THM1_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	skin_msm_therm@4e {
+		reg = <ADC_AMUX_THM2_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+
+	pa_therm2@4f {
+		reg = <ADC_AMUX_THM3_PU2>;
+		qcom,ratiometric;
+		qcom,hw-settle-time = <200>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
index dde4d12..6b25d5c 100644
--- a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
@@ -4,7 +4,6 @@
  */
 
 #include <dt-bindings/msm/msm-bus-ids.h>
-#include <dt-bindings/interrupt-controller/irq.h>
 
 &soc {
 	/* QUPv3_0  wrapper  instance : North QUP*/
@@ -225,6 +224,183 @@
 		status = "disabled";
 	};
 
+	/* SPI */
+	qupv3_se0_spi: spi@980000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x980000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se0_spi_active>;
+		pinctrl-1 = <&qupv3_se0_spi_sleep>;
+		interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 0 1 64 0>,
+			<&gpi_dma0 1 0 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se1_spi: spi@984000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x984000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se1_spi_active>;
+		pinctrl-1 = <&qupv3_se1_spi_sleep>;
+		interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 1 1 64 0>,
+			<&gpi_dma0 1 1 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se2_spi: spi@988000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x988000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se2_spi_active>;
+		pinctrl-1 = <&qupv3_se2_spi_sleep>;
+		interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 2 1 64 0>,
+			<&gpi_dma0 1 2 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se3_spi: spi@98c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x98c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se3_spi_active>;
+		pinctrl-1 = <&qupv3_se3_spi_sleep>;
+		interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 3 1 64 0>,
+			<&gpi_dma0 1 3 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se4_spi: spi@990000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x990000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se4_spi_active>;
+		pinctrl-1 = <&qupv3_se4_spi_sleep>;
+		interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 4 1 64 0>,
+			<&gpi_dma0 1 4 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se5_spi: spi@994000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x994000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se5_spi_active>;
+		pinctrl-1 = <&qupv3_se5_spi_sleep>;
+		interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 5 1 64 0>,
+			<&gpi_dma0 1 5 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se6_spi: spi@998000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x998000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_spi_active>;
+		pinctrl-1 = <&qupv3_se6_spi_sleep>;
+		interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 6 1 64 0>,
+			<&gpi_dma0 1 6 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se7_spi: spi@99c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x99c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se7_spi_active>;
+		pinctrl-1 = <&qupv3_se7_spi_sleep>;
+		interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 7 1 64 0>,
+			<&gpi_dma0 1 7 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
 	/* QUPv3 South_1 & South_2 Instances
 	 * South_1 0 : SE 8
 	 * South_1 1 : SE 9
@@ -394,6 +570,139 @@
 		status = "disabled";
 	};
 
+		/* SPI */
+	qupv3_se8_spi: spi@a80000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa80000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se8_spi_active>;
+		pinctrl-1 = <&qupv3_se8_spi_active>;
+		interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 0 1 64 0>,
+			<&gpi_dma1 1 0 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se9_spi: spi@a84000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se9_spi_active>;
+		pinctrl-1 = <&qupv3_se9_spi_sleep>;
+		interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 1 1 64 0>,
+			<&gpi_dma1 1 1 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se10_spi: spi@a88000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa88000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se10_spi_active>;
+		pinctrl-1 = <&qupv3_se10_spi_sleep>;
+		interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 2 1 64 0>,
+			<&gpi_dma1 1 2 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se11_spi: spi@a8c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa8c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se11_spi_active>;
+		pinctrl-1 = <&qupv3_se11_spi_sleep>;
+		interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 3 1 64 0>,
+			<&gpi_dma1 1 3 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se12_spi: spi@a90000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa90000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se12_spi_active>;
+		pinctrl-1 = <&qupv3_se12_spi_sleep>;
+		interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 4 1 64 0>,
+			<&gpi_dma1 1 4 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se13_spi: spi@a94000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa94000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se13_spi_active>;
+		pinctrl-1 = <&qupv3_se13_spi_sleep>;
+		interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 5 1 64 0>,
+			<&gpi_dma1 1 5 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
 	/* QUPv3_2  wrapper  instance : South_2 QUP */
 	qupv3_2: qcom,qupv3_2_geni_se@8c0000 {
 		compatible = "qcom,qupv3-geni-se";
@@ -574,4 +883,137 @@
 		status = "disabled";
 	};
 
+		/* SPI */
+	qupv3_se14_spi: spi@880000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x880000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se14_spi_active>;
+		pinctrl-1 = <&qupv3_se14_spi_sleep>;
+		interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_2>;
+		dmas = <&gpi_dma2 0 0 1 64 0>,
+			<&gpi_dma2 1 0 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se15_spi: spi@884000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x884000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S1_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se15_spi_active>;
+		pinctrl-1 = <&qupv3_se15_spi_sleep>;
+		interrupts = <GIC_SPI 583 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_2>;
+		dmas = <&gpi_dma2 0 1 1 64 IRQ_TYPE_LEVEL_HIGH>,
+			<&gpi_dma2 1 1 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se16_spi: spi@888000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x888000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S2_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se16_spi_active>;
+		pinctrl-1 = <&qupv3_se16_spi_sleep>;
+		interrupts = <GIC_SPI 584 0>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_2>;
+		dmas = <&gpi_dma2 0 2 1 64 IRQ_TYPE_LEVEL_HIGH>,
+			<&gpi_dma2 1 2 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se17_spi: spi@88c000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x88c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S3_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se17_spi_active>;
+		pinctrl-1 = <&qupv3_se17_spi_sleep>;
+		interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_2>;
+		dmas = <&gpi_dma2 0 3 1 64 0>,
+			<&gpi_dma2 1 3 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se18_spi: spi@890000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x890000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S4_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se18_spi_active>;
+		pinctrl-1 = <&qupv3_se18_spi_sleep>;
+		interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_2>;
+		dmas = <&gpi_dma2 0 4 1 64 0>,
+			<&gpi_dma2 1 4 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
+	qupv3_se19_spi: spi@894000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x894000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S5_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se19_spi_active>;
+		pinctrl-1 = <&qupv3_se19_spi_sleep>;
+		interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+		spi-max-frequency = <50000000>;
+		qcom,wrapper-core = <&qupv3_2>;
+		dmas = <&gpi_dma2 0 5 1 64 0>,
+			<&gpi_dma2 1 5 1 64 0>;
+		dma-names = "tx", "rx";
+		status = "disabled";
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
index e93c034..5b887ab 100644
--- a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
@@ -2,8 +2,9 @@
 /*
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
-
+#include <dt-bindings/gpio/gpio.h>
 #include "kona-pmic-overlay.dtsi"
+#include "kona-sde-display.dtsi"
 #include "msm-audio-lpass.dtsi"
 
 &arch_timer {
@@ -134,3 +135,21 @@
 		};
 	};
 };
+
+&sdhc_2 {
+	vdd-supply = <&pm8150a_l9>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8150a_l6>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
+
+	cd-gpios = <&tlmm 77 GPIO_ACTIVE_LOW>;
+
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
index 421a497..dbf5a99 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
@@ -1,9 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 &soc {
+	ext_disp: qcom,msm-ext-disp {
+		compatible = "qcom,msm-ext-disp";
+
+		ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx {
+			compatible = "qcom,msm-ext-disp-audio-codec-rx";
+		};
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
@@ -11,6 +19,18 @@
 	};
 };
 
+&sde_dp {
+	qcom,dp-usbpd-detection = <&pm8150b_pdphy>;
+	qcom,ext-disp = <&ext_disp>;
+	qcom,dp-aux-switch = <&fsa4480>;
+
+	qcom,usbplug-cc-gpio = <&tlmm 65 0>;
+
+	pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+	pinctrl-0 = <&sde_dp_usbplug_cc_active>;
+	pinctrl-1 = <&sde_dp_usbplug_cc_suspend>;
+};
+
 &mdss_mdp {
-	connectors = <&sde_wb>;
+	connectors = <&sde_dp &sde_wb>;
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
index 88c0649..6ee5fd6 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 &soc {
@@ -71,5 +71,13 @@
 		      <0xaf03000 0x8>;
 		reg-names = "pll_base", "phy_base", "ln_tx0_base",
 			"ln_tx1_base", "gdsc_base";
+
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			<&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_DISP_AHB_CLK>,
+			<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+		clock-names = "iface_clk", "ref_clk_src",
+			"gcc_iface", "pipe_clk";
+		clock-rate = <0>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
index 93cadb9..1928218 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -2,6 +2,8 @@
 /*
  * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
+#include <dt-bindings/clock/mdss-7nm-pll-clk.h>
+
 &soc {
 	mdss_mdp: qcom,mdss_mdp@ae00000 {
 		compatible = "qcom,sde-kms";
@@ -35,11 +37,6 @@
 		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
-		iommus = <&apps_smmu 0x820 0x402>;
-		qcom,iommu-dma = "disabled";
-
-		#address-cells = <1>;
-		#size-cells = <0>;
 
 		#power-domain-cells = <0>;
 
@@ -235,10 +232,18 @@
 			};
 		};
 
+		smmu_sde_unsec: qcom,smmu_sde_unsec_cb {
+			compatible = "qcom,smmu_sde_unsec";
+			iommus = <&apps_smmu 0x820 0x402>;
+			qcom,iommu-dma-addr-pool = <0x00020000 0xfffe0000>;
+			qcom,iommu-earlymap; /* for cont-splash */
+		};
+
 		smmu_sde_sec: qcom,smmu_sde_sec_cb {
 			compatible = "qcom,smmu_sde_sec";
 			iommus = <&apps_smmu 0x821 0x400>;
-			qcom,iommu-dma = "disabled";
+			qcom,iommu-dma-addr-pool = <0x00020000 0xfffe0000>;
+			qcom,iommu-vmid = <0xa>;
 		};
 
 		/* data and reg bus scale settings */
@@ -264,6 +269,112 @@
 		};
 	};
 
+	sde_dp: qcom,dp_display@ae90000 {
+		cell-index = <0>;
+		compatible = "qcom,dp-display";
+
+		vdda-1p2-supply = <&pm8150_l9>;
+		vdda-0p9-supply = <&pm8150_l18>;
+
+		reg =   <0xae90000 0x0dc>,
+			<0xae90200 0x0c0>,
+			<0xae90400 0x508>,
+			<0xae91000 0x094>,
+			<0x88eaa00 0x200>,
+			<0x88ea200 0x200>,
+			<0x88ea600 0x200>,
+			<0xaf02000 0x1a0>,
+			<0x780000 0x621c>,
+			<0x88ea040 0x10>,
+			<0x88e8000 0x20>,
+			<0x0aee1000 0x034>,
+			<0xae91400 0x094>;
+		/* dp_ctrl: dp_ahb, dp_aux, dp_link, dp_p0 */
+		reg-names = "dp_ahb", "dp_aux", "dp_link",
+			"dp_p0", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+			"dp_mmss_cc", "qfprom_physical", "dp_pll",
+			"usb3_dp_com", "hdcp_physical", "dp_p1";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <12 0>;
+
+		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+			<&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+			<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>,
+			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK_SRC>,
+			<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>,
+			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>;
+		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+			"core_usb_pipe_clk", "link_clk", "link_iface_clk",
+			"crypto_clk", "pixel_clk_rcg", "pixel_parent",
+			"pixel1_clk_rcg", "pixel1_parent",
+			"strm0_pixel_clk", "strm1_pixel_clk";
+
+		qcom,phy-version = <0x420>;
+		qcom,aux-cfg0-settings = [20 00];
+		qcom,aux-cfg1-settings = [24 13];
+		qcom,aux-cfg2-settings = [28 A4];
+		qcom,aux-cfg3-settings = [2c 00];
+		qcom,aux-cfg4-settings = [30 0a];
+		qcom,aux-cfg5-settings = [34 26];
+		qcom,aux-cfg6-settings = [38 0a];
+		qcom,aux-cfg7-settings = [3c 03];
+		qcom,aux-cfg8-settings = [40 b7];
+		qcom,aux-cfg9-settings = [44 03];
+
+		qcom,max-pclk-frequency-khz = <675000>;
+
+		qcom,mst-enable;
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		qcom,core-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,core-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "refgen";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
 	sde_rscc: qcom,sde_rscc@af20000 {
 		cell-index = <0>;
 		compatible = "qcom,sde-rsc";
diff --git a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
index b6f8904..af60af1 100644
--- a/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-thermal.dtsi
@@ -1,9 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <dt-bindings/thermal/thermal.h>
+#include "kona-pmic-overlay.dtsi"
 
 &thermal_zones {
 	aoss0-usr {
@@ -355,4 +356,102 @@
 			};
 		};
 	};
+
+	wp-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150b_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	xo-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_XO_THERM_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm1-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	camera-flash-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM1_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	skin-msm-therm-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM2_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
+
+	pa-therm2-usr {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "user_space";
+		thermal-sensors = <&pm8150l_adc_tm ADC_AMUX_THM3_PU2>;
+		trips {
+			active-config0 {
+				temperature = <125000>;
+				hysteresis = <1000>;
+				type = "passive";
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index c34bfa7..66feda6 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -33,6 +33,7 @@
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
+		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
 		pci-domain2 = &pcie2; /* PCIe2 domain */
 		serial0 = &qupv3_se2_2uart; /* RUMI */
 	};
@@ -871,11 +872,19 @@
 		reg = <0x18590000 0x4>, <0x18590100 0xa0>, <0x18590320 0x4>;
 		reg-names = "en-base", "ftbl-base", "perf-base";
 
-		qcom,cpu0-l3 {
+		cpu0_l3: qcom,cpu0-cpu-l3-lat {
 			compatible = "qcom,devfreq-fw-voter";
 		};
 
-		qcom,cpu4-l3 {
+		cpu4_l3: qcom,cpu4-cpu-l3-lat {
+			compatible = "qcom,devfreq-fw-voter";
+		};
+
+		cpu7_l3: qcom,cpu7-cpu-l3-lat {
+			compatible = "qcom,devfreq-fw-voter";
+		};
+
+		cdsp_l3: qcom,cdsp-cdsp-l3-lat {
 			compatible = "qcom,devfreq-fw-voter";
 		};
 	};
@@ -907,6 +916,7 @@
 		BW_OPP_ENTRY( 1017, 4); /*  3879 MB/s */
 		BW_OPP_ENTRY( 1353, 4); /*  5161 MB/s */
 		BW_OPP_ENTRY( 1555, 4); /*  5931 MB/s */
+		BW_OPP_ENTRY( 1804, 4); /*  6881 MB/s */
 		BW_OPP_ENTRY( 2092, 4); /*  7980 MB/s */
 		BW_OPP_ENTRY( 2736, 4); /* 10437 MB/s */
 	};
@@ -923,10 +933,273 @@
 		BW_OPP_ENTRY( 1017, 4); /*  3879 MB/s */
 		BW_OPP_ENTRY( 1353, 4); /*  5161 MB/s */
 		BW_OPP_ENTRY( 1555, 4); /*  5931 MB/s */
+		BW_OPP_ENTRY( 1804, 4); /*  6881 MB/s */
 		BW_OPP_ENTRY( 2092, 4); /*  7980 MB/s */
 		BW_OPP_ENTRY( 2736, 4); /* 10437 MB/s */
 	};
 
+	llcc_pmu: llcc-pmu@9090000 {
+		compatible = "qcom,qcom-llcc-pmu";
+		reg = <0x09090000 0x300>;
+		reg-names = "lagg-base";
+	};
+
+	cpu_cpu_llcc_bw: qcom,cpu-cpu-llcc-bw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
+		qcom,active-only;
+		operating-points-v2 = <&llcc_bw_opp_table>;
+	};
+
+	cpu_cpu_llcc_bwmon: qcom,cpu-cpu-llcc-bwmon@90b6400 {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x90b6400 0x300>, <0x90b6300 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <GIC_SPI 581 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,mport = <0>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&cpu_cpu_llcc_bw>;
+		qcom,count-unit = <0x10000>;
+	};
+
+	cpu_llcc_ddr_bw: qcom,cpu-llcc-ddr-bw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,active-only;
+		operating-points-v2 = <&ddr_bw_opp_table>;
+	};
+
+	cpu_llcc_ddr_bwmon: qcom,cpu-llcc-ddr-bwmon@9091000 {
+		compatible = "qcom,bimc-bwmon5";
+		reg = <0x9091000 0x1000>;
+		reg-names = "base";
+		interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&cpu_llcc_ddr_bw>;
+		qcom,count-unit = <0x10000>;
+	};
+
+	npu_npu_ddr_bw: qcom,npu-npu-ddr-bw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>;
+		operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
+	};
+
+	npu_npu_ddr_bwmon: qcom,npu-npu-ddr-bwmon@60300 {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x00060300 0x300>, <0x00060400 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,mport = <0>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&npu_npu_ddr_bw>;
+		qcom,count-unit = <0x10000>;
+	};
+
+	npu_npu_ddr_bwmon_dsp: qcom,npu-npu-ddr-bwmoni_dsp@70200 {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x00070200 0x300>, <0x00070300 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,mport = <0>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&npu_npu_ddr_bw>;
+		qcom,count-unit = <0x10000>;
+	};
+
+	cpu0_cpu_l3_latmon: qcom,cpu0-cpu-l3-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&cpu0_l3>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000  300000000 >,
+			<  403200  403200000 >,
+			<  518400  518400000 >,
+			<  633600  614400000 >,
+			<  825600  729600000 >,
+			<  921600  825600000 >,
+			< 1036800  921600000 >,
+			< 1132800 1036800000 >,
+			< 1228800 1132800000 >,
+			< 1401600 1228800000 >,
+			< 1497600 1305600000 >,
+			< 1670400 1382400000 >;
+	};
+
+	cpu4_cpu_l3_latmon: qcom,cpu4-cpu-l3-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6>;
+		qcom,target-dev = <&cpu4_l3>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000  300000000 >,
+			<  806400  614400000 >,
+			< 1017600  729600000 >,
+			< 1228800  921600000 >,
+			< 1689600 1228800000 >,
+			< 1804800 1305600000 >,
+			< 2227200 1382400000 >;
+	};
+
+	cpu7_cpu_l3_latmon: qcom,cpu7-cpu-l3-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU7>;
+		qcom,target-dev = <&cpu7_l3>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000  300000000 >,
+			<  806400  614400000 >,
+			< 1017600  729600000 >,
+			< 1228800  921600000 >,
+			< 1689600 1228800000 >,
+			< 1804800 1305600000 >,
+			< 2227200 1382400000 >;
+	};
+
+	cpu0_cpu_llcc_lat: qcom,cpu0-cpu-llcc-lat {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
+		qcom,active-only;
+		operating-points-v2 = <&llcc_bw_opp_table>;
+	};
+
+	cpu0_cpu_llcc_latmon: qcom,cpu0-cpu-llcc-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&cpu0_cpu_llcc_lat>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,core-dev-table =
+			<  300000 MHZ_TO_MBPS( 150, 16) >,
+			<  729600 MHZ_TO_MBPS( 300, 16) >,
+			< 1497600 MHZ_TO_MBPS( 466, 16) >,
+			< 1670400 MHZ_TO_MBPS( 600, 16) >;
+	};
+
+	cpu4_cpu_llcc_lat: qcom,cpu4-cpu-llcc-lat {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
+		qcom,active-only;
+		operating-points-v2 = <&llcc_bw_opp_table>;
+	};
+
+	cpu4_cpu_llcc_latmon: qcom,cpu4-cpu-llcc-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&cpu4_cpu_llcc_lat>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,core-dev-table =
+			<  300000 MHZ_TO_MBPS(  150, 16) >,
+			<  691200 MHZ_TO_MBPS(  300, 16) >,
+			< 1017600 MHZ_TO_MBPS(  466, 16) >,
+			< 1228800 MHZ_TO_MBPS(  600, 16) >,
+			< 1804800 MHZ_TO_MBPS(  806, 16) >,
+			< 2227200 MHZ_TO_MBPS(  933, 16) >,
+			< 2476800 MHZ_TO_MBPS( 1000, 16) >;
+	};
+
+	cpu0_llcc_ddr_lat: qcom,cpu0-llcc-ddr-lat {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,active-only;
+		operating-points-v2 = <&ddr_bw_opp_table>;
+	};
+
+	cpu0_llcc_ddr_latmon: qcom,cpu0-llcc-ddr-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&cpu0_llcc_ddr_lat>;
+		qcom,cachemiss-ev = <0x1000>;
+		qcom,core-dev-table =
+			<  300000 MHZ_TO_MBPS(  200, 4) >,
+			<  729600 MHZ_TO_MBPS(  451, 4) >,
+			< 1132800 MHZ_TO_MBPS(  547, 4) >,
+			< 1497600 MHZ_TO_MBPS(  768, 4) >,
+			< 1670400 MHZ_TO_MBPS( 1017, 4) >;
+	};
+
+	cpu4_llcc_ddr_lat: qcom,cpu4-llcc-ddr-lat {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,active-only;
+		operating-points-v2 = <&ddr_bw_opp_table>;
+	};
+
+	cpu4_llcc_ddr_latmon: qcom,cpu4-llcc-ddr-latmon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&cpu4_llcc_ddr_lat>;
+		qcom,cachemiss-ev = <0x1000>;
+		qcom,core-dev-table =
+			<  300000 MHZ_TO_MBPS( 200, 4) >,
+			<  691200 MHZ_TO_MBPS( 451, 4) >,
+			<  806400 MHZ_TO_MBPS( 547, 4) >,
+			< 1017600 MHZ_TO_MBPS( 768, 4) >,
+			< 1228800 MHZ_TO_MBPS(1017, 4) >,
+			< 1574400 MHZ_TO_MBPS(1353, 4) >,
+			< 1804800 MHZ_TO_MBPS(1555, 4) >,
+			< 2227200 MHZ_TO_MBPS(1804, 4) >,
+			< 2380800 MHZ_TO_MBPS(2092, 4) >,
+			< 2476800 MHZ_TO_MBPS(2736, 4) >;
+	};
+
+	cpu4_cpu_ddr_latfloor: qcom,cpu4-cpu-ddr-latfloor {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,active-only;
+		operating-points-v2 = <&ddr_bw_opp_table>;
+	};
+
+	cpu4_computemon: qcom,cpu4-computemon {
+		compatible = "qcom,arm-cpu-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&cpu4_cpu_ddr_latfloor>;
+		qcom,core-dev-table =
+			< 1804800 MHZ_TO_MBPS( 200, 4) >,
+			< 2380800 MHZ_TO_MBPS(1017, 4) >,
+			< 2500000 MHZ_TO_MBPS(2736, 4) >;
+	};
+
+	keepalive_opp_table: keepalive-opp-table {
+		compatible = "operating-points-v2";
+		opp-1 {
+			opp-hz = /bits/ 64 < 1 >;
+		};
+	};
+
+	snoc_cnoc_keepalive: qcom,snoc_cnoc_keepalive {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 627>;
+		qcom,active-only;
+		status = "ok";
+		operating-points-v2 = <&keepalive_opp_table>;
+	};
+
+	cdsp_keepalive: qcom,cdsp_keepalive {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <154 10070>;
+		qcom,active-only;
+		status = "ok";
+		operating-points-v2 = <&keepalive_opp_table>;
+	};
+
 	qcom,msm-imem@146bf000 {
 		compatible = "qcom,msm-imem";
 		reg = <0x146bf000 0x1000>;
@@ -998,6 +1271,7 @@
 		qcom,support-fde;
 		qcom,no-clock-support;
 		qcom,fde-key-size;
+		qcom,appsbl-qseecom-support;
 		qcom,commonlib64-loaded-by-uefi;
 		qcom,qsee-reentrancy-support = <2>;
 	};
@@ -1544,6 +1818,68 @@
 		status = "disabled";
 	};
 
+	sdhc_2: sdhci@8804000 {
+		compatible = "qcom,sdhci-msm-v5";
+		reg = <0x8804000 0x1000>;
+		reg-names = "hc_mem";
+
+		interrupts = <0 204 0>, <0 222 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		qcom,bus-width = <4>;
+		qcom,large-address-bus;
+
+		qcom,msm-bus,name = "sdhc2";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			/* No vote */
+			<81 512 0 0>, <1 608 0 0>,
+			/* 400 KB/s*/
+			<81 512 1046 1600>,
+			<1 608 1600 1600>,
+			/* 20 MB/s */
+			<81 512 52286 80000>,
+			<1 608 80000 80000>,
+			/* 25 MB/s */
+			<81 512 65360 100000>,
+			<1 608 100000 100000>,
+			/* 50 MB/s */
+			<81 512 130718 200000>,
+			<1 608 133320 133320>,
+			/* 100 MB/s */
+			<81 512 261438 200000>,
+			<1 608 150000 150000>,
+			/* 200 MB/s */
+			<81 512 261438 400000>,
+			<1 608 300000 300000>,
+			/* Max. bandwidth */
+			<81 512 1338562 4096000>,
+			<1 608 1338562 4096000>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100750000 200000000 4294967295>;
+
+		qcom,restore-after-cx-collapse;
+
+		qcom,clk-rates = <400000 20000000 25000000
+					50000000 100000000 201500000>;
+		qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50",
+				      "SDR104";
+
+		qcom,devfreq,freq-table = <50000000 201500000>;
+		clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
+			<&clock_gcc GCC_SDCC2_APPS_CLK>;
+		clock-names = "iface_clk", "core_clk";
+
+		/* PM QoS */
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <44 44>;
+		qcom,pm-qos-cpu-groups = <0x3f 0xc0>;
+		qcom,pm-qos-legacy-latency-us = <44 44>, <44 44>;
+
+		status = "disabled";
+	};
+
 	ipcc_mproc: qcom,ipcc@408000 {
 		compatible = "qcom,ipcc";
 		reg = <0x408000 0x1000>;
@@ -2304,6 +2640,10 @@
 		};
 	};
 
+	qcom_msmhdcp: qcom,msm_hdcp {
+		compatible = "qcom,msm-hdcp";
+	};
+
 	mem_dump {
 		compatible = "qcom,mem-dump";
 		memory-region = <&dump_mem>;
@@ -2831,7 +3171,6 @@
 #include "kona-coresight.dtsi"
 #include "kona-sde.dtsi"
 #include "kona-sde-pll.dtsi"
-#include "kona-sde-display.dtsi"
 
 #include "kona-pm.dtsi"
 
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index 0209668..ac61470 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -6,6 +6,7 @@
 #include <dt-bindings/input/qcom,qpnp-power-on.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
 
 &spmi_bus {
 	#address-cells = <2>;
@@ -87,6 +88,50 @@
 			compatible = "qcom,pm8941-rtc";
 			interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
 		};
+
+		pm8150_vadc: vadc@3100 {
+			compatible = "qcom,spmi-adc5";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-vdd-reference = <1875>;
+			#io-channel-cells = <1>;
+			io-channel-ranges;
+
+			/* Channel node */
+			ref_gnd@0 {
+				reg = <ADC_REF_GND>;
+				label = "ref_gnd";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			vref_1p25@1 {
+				reg = <ADC_1P25VREF>;
+				label = "vref_1p25";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			die_temp@2 {
+				reg = <ADC_DIE_TEMP>;
+				label = "die_temp";
+				qcom,pre-scaling = <1 1>;
+			};
+		};
+
+		pm8150_adc_tm: adc_tm@3500 {
+			compatible = "qcom,adc-tm5";
+			reg = <0x3500 0x100>;
+			interrupts = <0x0 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "thr-int-en";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#thermal-sensor-cells = <1>;
+			io-channels = <&pm8150_vadc ADC_XO_THERM_PU2>,
+					<&pm8150_vadc ADC_AMUX_THM1_PU2>,
+					<&pm8150_vadc ADC_AMUX_THM2_PU2>;
+		};
 	};
 
 	qcom,pm8150@1 {
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index cb423b7..7b3287f 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -3,6 +3,7 @@
 
 #include <dt-bindings/spmi/spmi.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
 
 &spmi_bus {
 	#address-cells = <2>;
@@ -332,6 +333,71 @@
 						  "mem-attn";
 			};
 		};
+
+		pm8150b_vadc: vadc@3100 {
+			compatible = "qcom,spmi-adc5";
+			reg = <0x3100 0x100>, <0x3700 0x100>;
+			reg-names = "adc5-usr-base", "adc5-cal-base";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x2 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-vdd-reference = <1875>;
+			#io-channel-cells = <1>;
+			io-channel-ranges;
+
+			/* Channel node */
+			ref_gnd@0 {
+				reg = <ADC_REF_GND>;
+				label = "ref_gnd";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			vref_1p25@1 {
+				reg = <ADC_1P25VREF>;
+				label = "vref_1p25";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			die_temp@2 {
+				reg = <ADC_DIE_TEMP>;
+				label = "die_temp";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			chg_temp@9 {
+				reg = <ADC_CHG_TEMP>;
+				label = "chg_temp";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			smb1390_therm@e {
+				reg = <ADC_AMUX_THM2>;
+				label = "smb1390_therm";
+				qcom,hw-settle-time = <200>;
+				qcom,pre-scaling = <1 1>;
+			};
+
+			smb1355_therm@4e {
+				reg = <ADC_AMUX_THM2_PU2>;
+				label = "smb1355_therm";
+				qcom,ratiometric;
+				qcom,hw-settle-time = <200>;
+				qcom,pre-scaling = <1 1>;
+			};
+		};
+
+		pm8150b_adc_tm: adc_tm@3500 {
+			compatible = "qcom,adc-tm5";
+			reg = <0x3500 0x100>;
+			interrupts = <0x2 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "thr-int-en";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#thermal-sensor-cells = <1>;
+			io-channels = <&pm8150b_vadc ADC_AMUX_THM1_PU2>;
+			qcom,pmic-revid = <&pm8150b_revid>;
+		};
 	};
 
 	qcom,pm8150b@3 {
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index 5523725..6d8ca09 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -3,6 +3,7 @@
 
 #include <dt-bindings/spmi/spmi.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
 
 &spmi_bus {
 	#address-cells = <2>;
@@ -65,6 +66,50 @@
 			#gpio-cells = <2>;
 			qcom,gpios-disallowed = <2 7 12>;
 		};
+
+		pm8150l_vadc: vadc@3100 {
+			compatible = "qcom,spmi-adc5";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x4 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-vdd-reference = <1875>;
+			#io-channel-cells = <1>;
+			io-channel-ranges;
+
+			/* Channel node */
+			ref_gnd@0 {
+				reg = <ADC_REF_GND>;
+				label = "ref_gnd";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			vref_1p25@1 {
+				reg = <ADC_1P25VREF>;
+				label = "vref_1p25";
+				qcom,pre-scaling = <1 1>;
+			};
+
+			die_temp@2 {
+				reg = <ADC_DIE_TEMP>;
+				label = "die_temp";
+				qcom,pre-scaling = <1 1>;
+			};
+		};
+
+		pm8150l_adc_tm: adc_tm@3500 {
+			compatible = "qcom,adc-tm5";
+			reg = <0x3500 0x100>;
+			interrupts = <0x4 0x35 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "thr-int-en";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#thermal-sensor-cells = <1>;
+			io-channels = <&pm8150l_vadc ADC_AMUX_THM1_PU2>,
+					<&pm8150l_vadc ADC_AMUX_THM2_PU2>,
+					<&pm8150l_vadc ADC_AMUX_THM3_PU2>;
+		};
 	};
 
 	qcom,pm8150l@5 {
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 5595845..aa30841 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -310,6 +310,7 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
@@ -350,6 +351,7 @@
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
 CONFIG_MSM_CVP_V4L2=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
@@ -404,8 +406,12 @@
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
 CONFIG_MMC_TEST=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -498,11 +504,20 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
 CONFIG_PWM=y
 CONFIG_PWM_QTI_LPG=y
 CONFIG_QCOM_PDC=y
+CONFIG_QCOM_LLCC_PMU=y
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index bbb9e82..c49671e 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -319,6 +319,7 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
@@ -359,6 +360,7 @@
 CONFIG_VIDEO_ADV_DEBUG=y
 CONFIG_VIDEO_FIXED_MINOR_RANGES=y
 CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SPECTRA_CAMERA=y
 CONFIG_MSM_CVP_V4L2=y
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
@@ -414,8 +416,12 @@
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
 CONFIG_MMC_TEST=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -503,6 +509,8 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_BUS_SCALING=y
 CONFIG_QCOM_BUS_CONFIG_RPMH=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
 CONFIG_QSEE_IPC_IRQ_BRIDGE=y
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
@@ -513,13 +521,21 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
 CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_IIO=y
 CONFIG_QCOM_SPMI_ADC5=y
 CONFIG_PWM=y
 CONFIG_PWM_QTI_LPG=y
 CONFIG_QCOM_PDC=y
 CONFIG_PHY_XGENE=y
+CONFIG_QCOM_LLCC_PMU=y
 CONFIG_RAS=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index d87b9eb..48d6061 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -251,13 +251,13 @@
 /*
  * io{read,write}{16,32,64}be() macros
  */
-#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
+#define ioread16be(p)		({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw_no_log(p)); __iormb(); __v; })
+#define ioread32be(p)		({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl_no_log(p)); __iormb(); __v; })
+#define ioread64be(p)		({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq_no_log(p)); __iormb(); __v; })
 
-#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
-#define iowrite64be(v,p)	({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
+#define iowrite16be(v,p)	({ __iowmb(); __raw_writew_no_log((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p)	({ __iowmb(); __raw_writel_no_log((__force __u32)cpu_to_be32(v), p); })
+#define iowrite64be(v,p)	({ __iowmb(); __raw_writeq_no_log((__force __u64)cpu_to_be64(v), p); })
 
 #include <asm-generic/io.h>
 
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index f93e7f1..49e0909 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -2107,6 +2107,8 @@
 		if (!strcmp(proc_name, "audiopd")) {
 			fl->spdname = AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME;
 			VERIFY(err, !fastrpc_mmap_remove_pdr(fl));
+			if (err)
+				goto bail;
 		}
 
 		if (!me->staticpd_flags && !(me->legacy_remote_heap)) {
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 5a7910b..b5c34d1 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -339,8 +339,8 @@
 		DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
 			 atomic_read(&entry->ref_count));
 		diag_ws_on_copy_complete(DIAG_WS_MUX);
-		spin_unlock_irqrestore(&ch->write_lock, flags);
 		diagmem_free(driver, req, ch->mempool);
+		spin_unlock_irqrestore(&ch->write_lock, flags);
 		return;
 	}
 	DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %d\n",
@@ -357,8 +357,8 @@
 	buf = NULL;
 	len = 0;
 	ctxt = 0;
-	spin_unlock_irqrestore(&ch->write_lock, flags);
 	diagmem_free(driver, req, ch->mempool);
+	spin_unlock_irqrestore(&ch->write_lock, flags);
 }
 
 static void diag_usb_notifier(void *priv, unsigned int event,
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index cabae13..ecdf41f 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -604,6 +604,7 @@
 	unsigned int poolsize_hdlc;
 	unsigned int poolsize_dci;
 	unsigned int poolsize_user;
+	spinlock_t diagmem_lock;
 	/* Buffers for masks */
 	struct mutex diag_cntl_mutex;
 	/* Members for Sending response */
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 6e5b656..670a527 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -183,6 +183,7 @@
 	if ((count < ret+length) || (copy_to_user(buf,		\
 			(void *)&data, length))) {		\
 		ret = -EFAULT;					\
+		break;							\
 	}							\
 	ret += length;						\
 } while (0)
@@ -195,17 +196,19 @@
 static void diag_drain_apps_data(struct diag_apps_data_t *data)
 {
 	int err = 0;
+	unsigned long flags;
 
 	if (!data || !data->buf)
 		return;
 
 	err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
 			     data->ctxt);
+	spin_lock_irqsave(&driver->diagmem_lock, flags);
 	if (err)
 		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
-
 	data->buf = NULL;
 	data->len = 0;
+	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 }
 
 void diag_update_user_client_work_fn(struct work_struct *work)
@@ -276,6 +279,8 @@
 	diagmem_init(driver, POOL_TYPE_HDLC);
 	diagmem_init(driver, POOL_TYPE_USER);
 	diagmem_init(driver, POOL_TYPE_DCI);
+
+	spin_lock_init(&driver->diagmem_lock);
 }
 
 static void diag_mempool_exit(void)
@@ -2872,6 +2877,7 @@
 	struct diag_apps_data_t *data = &hdlc_data;
 	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
 	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	unsigned long flags;
 	/*
 	 * The maximum encoded size of the buffer can be atmost twice the length
 	 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
@@ -2976,10 +2982,11 @@
 	return PKT_ALLOC;
 
 fail_free_buf:
+	spin_lock_irqsave(&driver->diagmem_lock, flags);
 	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
 	data->buf = NULL;
 	data->len = 0;
-
+	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 fail_ret:
 	return ret;
 }
@@ -2991,6 +2998,7 @@
 	int ret = PKT_DROP;
 	struct diag_pkt_frame_t header;
 	struct diag_apps_data_t *data = &non_hdlc_data;
+	unsigned long flags;
 	/*
 	 * The maximum packet size, when the data is non hdlc encoded is equal
 	 * to the size of the packet frame header and the length. Add 1 for the
@@ -3055,10 +3063,11 @@
 	return PKT_ALLOC;
 
 fail_free_buf:
+	spin_lock_irqsave(&driver->diagmem_lock, flags);
 	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
 	data->buf = NULL;
 	data->len = 0;
-
+	spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 fail_ret:
 	return ret;
 }
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 5f130ea..c4c2e6d 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
  */
 #include <linux/slab.h>
 #include <linux/init.h>
@@ -1777,9 +1777,11 @@
 			diagfwd_write_done(peripheral, type, num);
 			diag_ws_on_copy(DIAG_WS_MUX);
 		} else if (peripheral == APPS_DATA) {
+			spin_lock_irqsave(&driver->diagmem_lock, flags);
 			diagmem_free(driver, (unsigned char *)buf,
 				     POOL_TYPE_HDLC);
 			buf = NULL;
+			spin_unlock_irqrestore(&driver->diagmem_lock, flags);
 		} else {
 			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
 					   peripheral, __func__, type);
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 64da6e6..0094a84 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2008-2014, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, 2016-2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -212,7 +212,7 @@
 			break;
 		}
 		spin_lock_irqsave(&mempool->lock, flags);
-		if (mempool->count > 0) {
+		if (mempool->count > 0 && buf) {
 			mempool_free(buf, mempool->pool);
 			atomic_add(-1, (atomic_t *)&mempool->count);
 		} else {
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index aded450..3ed2867 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -374,7 +374,7 @@
 	},
 };
 
-static struct clk_init_data gcc_npu_dma_clk_init = {
+static struct clk_init_data gcc_npu_dma_clk_src_init = {
 	.name = "gcc_npu_dma_clk_src",
 	.parent_names = gcc_parent_names_3,
 	.num_parents = 7,
@@ -394,7 +394,7 @@
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_3,
-	.clkr.hw.init = &gcc_npu_dma_clk_init,
+	.clkr.hw.init = &gcc_npu_dma_clk_src_init,
 };
 
 static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
@@ -444,7 +444,7 @@
 	{ }
 };
 
-static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
 	.name = "gcc_qupv3_wrap0_s0_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -463,10 +463,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
 	.name = "gcc_qupv3_wrap0_s1_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -485,10 +485,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
 	.name = "gcc_qupv3_wrap0_s2_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -507,10 +507,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
 	.name = "gcc_qupv3_wrap0_s3_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -529,10 +529,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
 	.name = "gcc_qupv3_wrap0_s4_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -551,10 +551,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
 	.name = "gcc_qupv3_wrap0_s5_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -573,10 +573,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
 	.name = "gcc_qupv3_wrap1_s0_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -595,10 +595,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
 	.name = "gcc_qupv3_wrap1_s1_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -617,10 +617,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
 	.name = "gcc_qupv3_wrap1_s2_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -639,10 +639,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
 	.name = "gcc_qupv3_wrap1_s3_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -661,10 +661,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
 	.name = "gcc_qupv3_wrap1_s4_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -683,10 +683,10 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
 };
 
-static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
 	.name = "gcc_qupv3_wrap1_s5_clk_src",
 	.parent_names = gcc_parent_names_0,
 	.num_parents = 4,
@@ -705,7 +705,7 @@
 	.hid_width = 5,
 	.parent_map = gcc_parent_map_0,
 	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
-	.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
 };
 
 static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
@@ -2699,19 +2699,19 @@
 };
 
 static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
-	DEFINE_RCG_DFS(gcc_npu_dma_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk),
-	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk),
+	DEFINE_RCG_DFS(gcc_npu_dma_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
 };
 
 static const struct regmap_config gcc_lito_regmap_config = {
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 5376286..ee19c03 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -36,6 +36,7 @@
 
 	struct gpio_desc *id_gpiod;
 	struct gpio_desc *vbus_gpiod;
+	struct gpio_desc *vbus_out_gpiod;
 	int id_irq;
 	int vbus_irq;
 
@@ -80,12 +81,17 @@
 		gpiod_get_value_cansleep(info->vbus_gpiod) : id;
 
 	/* at first we clean states which are no longer active */
-	if (id)
+	if (id) {
+		if (info->vbus_out_gpiod)
+			gpiod_set_value_cansleep(info->vbus_out_gpiod, 0);
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
+	}
 	if (!vbus)
 		extcon_set_state_sync(info->edev, EXTCON_USB, false);
 
 	if (!id) {
+		if (info->vbus_out_gpiod)
+			gpiod_set_value_cansleep(info->vbus_out_gpiod, 1);
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
 	} else {
 		if (vbus)
@@ -121,6 +127,8 @@
 	info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
 	info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
 						   GPIOD_IN);
+	info->vbus_out_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus-out",
+						   GPIOD_OUT_HIGH);
 
 	if (!info->id_gpiod && !info->vbus_gpiod) {
 		dev_err(dev, "failed to get gpios\n");
@@ -133,6 +141,9 @@
 	if (IS_ERR(info->vbus_gpiod))
 		return PTR_ERR(info->vbus_gpiod);
 
+	if (IS_ERR(info->vbus_out_gpiod))
+		return PTR_ERR(info->vbus_out_gpiod);
+
 	info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
 	if (IS_ERR(info->edev)) {
 		dev_err(dev, "failed to allocate extcon device\n");
@@ -194,7 +205,7 @@
 	}
 
 	platform_set_drvdata(pdev, info);
-	device_set_wakeup_capable(&pdev->dev, true);
+	device_init_wakeup(&pdev->dev, true);
 
 	/* Perform initial detection */
 	usb_extcon_detect_cable(&info->wq_detcable.work);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cb88528e..65f3eaf 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,6 +9,7 @@
 	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
 	select DRM_PANEL_ORIENTATION_QUIRKS
 	select HDMI
+	select FB
 	select FB_CMDLINE
 	select I2C
 	select I2C_ALGOBIT
@@ -93,7 +94,7 @@
 	depends on DRM
 	select DRM_KMS_HELPER
 	select DRM_KMS_FB_HELPER
-	default y
+	default n
 	help
 	  Choose this option if you have a need for the legacy fbdev
 	  support. Note that this support also provides the linux console
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 7d670ef..94093bc 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -33,7 +33,7 @@
 
 config DRM_MSM_DP
 	bool "Enable Display Port"
-	depends on DRM_MSM_SDE
+	depends on DRM_MSM_SDE && USB_PD
 	default n
 	help
 	  This option enables compilation of Display Port
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 36f2a32..9125a7a 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
@@ -1858,6 +1858,7 @@
 			phandle, 0);
 	if (!dp->aux_switch_node) {
 		pr_warn("cannot parse %s handle\n", phandle);
+		rc = -ENODEV;
 		goto end;
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6df888c..cef7129 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1787,8 +1787,8 @@
 				DRIVER_MODESET,
 	.open               = msm_open,
 	.preclose           = msm_preclose,
-	.postclose           = msm_postclose,
-	.lastclose          = drm_fb_helper_lastclose,
+	.postclose          = msm_postclose,
+	.lastclose          = msm_lastclose,
 	.irq_handler        = msm_irq,
 	.irq_preinstall     = msm_irq_preinstall,
 	.irq_postinstall    = msm_irq_postinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c2893c5..e9d8241 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -877,6 +877,12 @@
 static inline void __exit msm_edp_unregister(void)
 {
 }
+
+static inline int msm_edp_modeset_init(struct msm_edp *edp,
+		struct drm_device *dev, struct drm_encoder *encoder)
+{
+	return -EINVAL;
+}
 #endif
 
 struct msm_dsi;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 855caaa..8cc39ee 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -79,6 +79,7 @@
 static struct page **get_pages(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct device *aspace_dev;
 
 	if (obj->import_attach)
 		return msm_obj->pages;
@@ -114,9 +115,11 @@
 		 * Make sure to flush the CPU cache for newly allocated memory
 		 * so we don't get ourselves into trouble with a dirty cache
 		 */
-		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
+			aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
+			dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
 				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+		}
 	}
 
 	return msm_obj->pages;
@@ -136,6 +139,7 @@
 
 static void put_pages(struct drm_gem_object *obj)
 {
+	struct device *aspace_dev;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 	if (msm_obj->pages) {
@@ -144,10 +148,13 @@
 			 * pages are clean because display controller,
 			 * GPU, etc. are not coherent:
 			 */
-			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
+				aspace_dev =
+				    msm_gem_get_aspace_device(msm_obj->aspace);
+				dma_unmap_sg(aspace_dev, msm_obj->sgt->sgl,
 					     msm_obj->sgt->nents,
 					     DMA_BIDIRECTIONAL);
+			}
 
 			sg_free_table(msm_obj->sgt);
 			kfree(msm_obj->sgt);
@@ -187,6 +194,7 @@
 void msm_gem_sync(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj;
+	struct device *aspace_dev;
 
 	if (!obj)
 		return;
@@ -197,7 +205,8 @@
 	 * dma_sync_sg_for_device synchronises a single contiguous or
 	 * scatter/gather mapping for the CPU and device.
 	 */
-	dma_sync_sg_for_device(obj->dev->dev, msm_obj->sgt->sgl,
+	aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
+	dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
 		       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 }
 
@@ -427,44 +436,9 @@
 
 	if (!vma) {
 		struct page **pages;
-		struct device *dev;
-		struct dma_buf *dmabuf;
-		bool reattach = false;
-
-		/*
-		 * both secure/non-secure domains are attached with the default
-		 * devive (non-sec) with dma_buf_attach during
-		 * msm_gem_prime_import. detach and attach the correct device
-		 * to the dma_buf based on the aspace domain.
-		 */
-		dev = msm_gem_get_aspace_device(aspace);
-		if (dev && obj->import_attach &&
-				(dev != obj->import_attach->dev)) {
-			dmabuf = obj->import_attach->dmabuf;
-
-			DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
-					 obj->import_attach->dev, dev);
-			SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt);
-
-
-			if (msm_obj->sgt)
-				dma_buf_unmap_attachment(obj->import_attach,
-							msm_obj->sgt,
-							DMA_BIDIRECTIONAL);
-			dma_buf_detach(dmabuf, obj->import_attach);
-
-			obj->import_attach = dma_buf_attach(dmabuf, dev);
-			if (IS_ERR(obj->import_attach)) {
-				DRM_ERROR("dma_buf_attach failure, err=%ld\n",
-						PTR_ERR(obj->import_attach));
-				goto unlock;
-			}
-			reattach = true;
-		}
 
 		/* perform delayed import for buffers without existing sgt */
-		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
-				|| reattach) {
+		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))) {
 			ret = msm_gem_delayed_import(obj);
 			if (ret) {
 				DRM_ERROR("delayed dma-buf import failed %d\n",
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index c12fc7b..895a94d 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -18,9 +18,12 @@
 
 #include "msm_drv.h"
 #include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_kms.h"
 
 #include <linux/dma-buf.h>
 #include <linux/ion.h>
+#include <linux/msm_ion.h>
 
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
@@ -87,13 +90,19 @@
 	struct dma_buf_attachment *attach;
 	struct sg_table *sgt = NULL;
 	struct drm_gem_object *obj;
-	struct device *attach_dev;
+	struct device *attach_dev = NULL;
 	unsigned long flags = 0;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
 	int ret;
+	u32 domain;
 
-	if (!dma_buf)
+	if (!dma_buf || !dev->dev_private)
 		return ERR_PTR(-EINVAL);
 
+	priv = dev->dev_private;
+	kms = priv->kms;
+
 	if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) {
 		obj = dma_buf->priv;
 		if (obj->dev == dev) {
@@ -111,25 +120,37 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	attach_dev = dev->dev;
+	get_dma_buf(dma_buf);
+
+	ret = dma_buf_get_flags(dma_buf, &flags);
+	if (ret) {
+		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
+		goto fail_put;
+	}
+
+	domain = (flags & ION_FLAG_SECURE) ? MSM_SMMU_DOMAIN_SECURE :
+						MSM_SMMU_DOMAIN_UNSECURE;
+	if (kms && kms->funcs->get_address_space_device)
+		attach_dev = kms->funcs->get_address_space_device(
+							kms, domain);
+	if (!attach_dev) {
+		DRM_ERROR("aspace device not found for domain:%d\n", domain);
+		ret = -EINVAL;
+		goto fail_put;
+	}
+
 	attach = dma_buf_attach(dma_buf, attach_dev);
 	if (IS_ERR(attach)) {
 		DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach));
 		return ERR_CAST(attach);
 	}
 
-	get_dma_buf(dma_buf);
-
 	/*
 	 * For cached buffers where CPU access is required, dma_map_attachment
 	 * must be called now to allow user-space to perform cpu sync begin/end
 	 * otherwise do delayed mapping during the commit.
 	 */
-	ret = dma_buf_get_flags(dma_buf, &flags);
-	if (ret) {
-		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
-		goto fail_put;
-	} else if (flags & ION_FLAG_CACHED) {
+	if (flags & ION_FLAG_CACHED) {
 		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
 		sgt = dma_buf_map_attachment(
 				attach, DMA_BIDIRECTIONAL);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index dacb1c5..e062b07 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -107,6 +107,9 @@
 	struct msm_gem_address_space *(*get_address_space)(
 			struct msm_kms *kms,
 			unsigned int domain);
+	struct device *(*get_address_space_device)(
+			struct msm_kms *kms,
+			unsigned int domain);
 #ifdef CONFIG_DEBUG_FS
 	/* debugfs: */
 	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 25cf4d5..f1e9741 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -29,17 +29,9 @@
 #include "msm_mmu.h"
 #include "sde_dbg.h"
 
-#ifndef SZ_4G
-#define SZ_4G	(((size_t) SZ_1G) * 4)
-#endif
-
-#ifndef SZ_2G
-#define SZ_2G	(((size_t) SZ_1G) * 2)
-#endif
-
 struct msm_smmu_client {
 	struct device *dev;
-	struct dma_iommu_mapping *mmu_mapping;
+	struct iommu_domain *domain;
 	bool domain_attached;
 	bool secure;
 };
@@ -52,17 +44,12 @@
 
 struct msm_smmu_domain {
 	const char *label;
-	size_t va_start;
-	size_t va_size;
 	bool secure;
 };
 
 #define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
 #define msm_smmu_to_client(smmu) (smmu->client)
 
-static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
-	const struct msm_smmu_domain *domain);
-
 static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
 		int cnt)
 {
@@ -79,11 +66,9 @@
 	if (client->domain_attached)
 		return 0;
 
-	rc = __depr_arm_iommu_attach_device(client->dev,
-			client->mmu_mapping);
+	rc = iommu_attach_device(client->domain, client->dev);
 	if (rc) {
-		dev_err(client->dev, "iommu attach dev failed (%d)\n",
-				rc);
+		dev_err(client->dev, "iommu attach dev failed (%d)\n", rc);
 		return rc;
 	}
 
@@ -109,7 +94,7 @@
 		return;
 
 	pm_runtime_get_sync(mmu->dev);
-	__depr_arm_iommu_detach_device(client->dev);
+	iommu_detach_device(client->domain, client->dev);
 	pm_runtime_put_sync(mmu->dev);
 
 	client->domain_attached = false;
@@ -123,10 +108,10 @@
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
 	int ret = 0;
 
-	if (!client || !client->mmu_mapping)
+	if (!client || !client->domain)
 		return -ENODEV;
 
-	ret = iommu_domain_set_attr(client->mmu_mapping->domain, attr, data);
+	ret = iommu_domain_set_attr(client->domain, attr, data);
 	if (ret)
 		DRM_ERROR("set domain attribute failed:%d\n", ret);
 
@@ -140,10 +125,10 @@
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
 	int ret = 0;
 
-	if (!client || !client->mmu_mapping)
+	if (!client || !client->domain)
 		return -ENODEV;
 
-	ret = iommu_unmap(client->mmu_mapping->domain, dest_address, size);
+	ret = iommu_unmap(client->domain, dest_address, size);
 	if (ret != size)
 		pr_err("smmu unmap failed\n");
 
@@ -157,10 +142,10 @@
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
 	int ret = 0;
 
-	if (!client || !client->mmu_mapping)
+	if (!client || !client->domain)
 		return -ENODEV;
 
-	ret = iommu_map(client->mmu_mapping->domain, dest_address, dest_address,
+	ret = iommu_map(client->domain, dest_address, dest_address,
 			size, prot);
 	if (ret)
 		pr_err("smmu map failed\n");
@@ -176,13 +161,12 @@
 	size_t ret = 0;
 
 	if (sgt && sgt->sgl) {
-		ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl,
+		ret = iommu_map_sg(client->domain, iova, sgt->sgl,
 				sgt->nents, prot);
 		WARN_ON((int)ret < 0);
 		DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address,
 				sgt->sgl->dma_length, prot);
-		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
-				prot);
+		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, prot);
 	}
 	return (ret == len) ? 0 : -EINVAL;
 }
@@ -194,7 +178,7 @@
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
 
 	pm_runtime_get_sync(mmu->dev);
-	iommu_unmap(client->mmu_mapping->domain, iova, len);
+	iommu_unmap(client->domain, iova, len);
 	pm_runtime_put_sync(mmu->dev);
 
 	return 0;
@@ -304,26 +288,18 @@
 static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
 	[MSM_SMMU_DOMAIN_UNSECURE] = {
 		.label = "mdp_ns",
-		.va_start = SZ_2G,
-		.va_size = SZ_4G - SZ_2G,
 		.secure = false,
 	},
 	[MSM_SMMU_DOMAIN_SECURE] = {
 		.label = "mdp_s",
-		.va_start = SZ_2G,
-		.va_size = SZ_4G - SZ_2G,
 		.secure = true,
 	},
 	[MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
 		.label = "rot_ns",
-		.va_start = SZ_2G,
-		.va_size = SZ_4G - SZ_2G,
 		.secure = false,
 	},
 	[MSM_SMMU_DOMAIN_NRT_SECURE] = {
 		.label = "rot_s",
-		.va_start = SZ_2G,
-		.va_size = SZ_4G - SZ_2G,
 		.secure = true,
 	},
 };
@@ -363,27 +339,6 @@
 	}
 	DRM_DEBUG("found domain %d compat: %s\n", domain, compat);
 
-	if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
-		int rc;
-
-		smmu->client = devm_kzalloc(dev,
-				sizeof(struct msm_smmu_client), GFP_KERNEL);
-		if (!smmu->client)
-			return ERR_PTR(-ENOMEM);
-
-		smmu->client->dev = dev;
-
-		rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
-			msm_smmu_dt_match[i].data);
-		if (rc) {
-			devm_kfree(dev, smmu->client);
-			smmu->client = NULL;
-			return ERR_PTR(rc);
-		}
-
-		return NULL;
-	}
-
 	child = of_find_compatible_node(dev->of_node, NULL, compat);
 	if (!child) {
 		DRM_DEBUG("unable to find compatible node for %s\n", compat);
@@ -407,19 +362,11 @@
 {
 	struct msm_smmu *smmu;
 	struct device *client_dev;
-	bool smmu_full_map;
 
 	smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
 	if (!smmu)
 		return ERR_PTR(-ENOMEM);
 
-	smmu_full_map = of_property_read_bool(dev->of_node,
-					"qcom,fullsize-va-map");
-	if (smmu_full_map) {
-		msm_smmu_domains[domain].va_start = SZ_128K;
-		msm_smmu_domains[domain].va_size = SZ_4G - SZ_128K;
-	}
-
 	client_dev = msm_smmu_device_create(dev, domain, smmu);
 	if (IS_ERR(client_dev)) {
 		kfree(smmu);
@@ -461,62 +408,6 @@
 	return rc;
 }
 
-static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
-	const struct msm_smmu_domain *domain)
-{
-	int rc;
-	int mdphtw_llc_enable = 1;
-
-	client->mmu_mapping = __depr_arm_iommu_create_mapping(
-			&platform_bus_type, domain->va_start, domain->va_size);
-	if (IS_ERR(client->mmu_mapping)) {
-		dev_err(client->dev,
-			"iommu create mapping failed for domain=%s\n",
-			domain->label);
-		return PTR_ERR(client->mmu_mapping);
-	}
-
-	rc = iommu_domain_set_attr(client->mmu_mapping->domain,
-			DOMAIN_ATTR_USE_UPSTREAM_HINT, &mdphtw_llc_enable);
-	if (rc) {
-		dev_err(client->dev, "couldn't enable mdp pagetable walks: %d\n",
-			rc);
-		goto error;
-	}
-
-	if (domain->secure) {
-		int secure_vmid = VMID_CP_PIXEL;
-
-		client->secure = true;
-		rc = iommu_domain_set_attr(client->mmu_mapping->domain,
-				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
-		if (rc) {
-			dev_err(client->dev, "couldn't set secure pix vmid\n");
-			goto error;
-		}
-	}
-
-	if (!client->dev->dma_parms)
-		client->dev->dma_parms = devm_kzalloc(client->dev,
-				sizeof(*client->dev->dma_parms), GFP_KERNEL);
-
-	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
-	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));
-
-	iommu_set_fault_handler(client->mmu_mapping->domain,
-			msm_smmu_fault_handler, (void *)client);
-
-	DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
-			domain->label, domain->va_start, domain->va_size,
-			domain->secure);
-
-	return 0;
-
-error:
-	__depr_arm_iommu_release_mapping(client->mmu_mapping);
-	return rc;
-}
-
 /**
  * msm_smmu_probe()
  * @pdev: platform device
@@ -531,7 +422,6 @@
 	const struct of_device_id *match;
 	struct msm_smmu_client *client;
 	const struct msm_smmu_domain *domain;
-	int rc;
 
 	match = of_match_device(msm_smmu_dt_match, &pdev->dev);
 	if (!match || !match->data) {
@@ -552,11 +442,27 @@
 		return -ENOMEM;
 
 	client->dev = &pdev->dev;
+	client->domain = iommu_get_domain_for_dev(client->dev);
+	if (!client->domain) {
+		dev_err(&pdev->dev, "iommu get domain for dev failed\n");
+		return -EINVAL;
+	}
 
-	rc = _msm_smmu_create_mapping(client, domain);
+	if (!client->dev->dma_parms)
+		client->dev->dma_parms = devm_kzalloc(client->dev,
+				sizeof(*client->dev->dma_parms), GFP_KERNEL);
+	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));
+
+	iommu_set_fault_handler(client->domain,
+			msm_smmu_fault_handler, (void *)client);
+
+	DRM_INFO("Created domain %s, secure=%d\n",
+			domain->label, domain->secure);
+
 	platform_set_drvdata(pdev, client);
 
-	return rc;
+	return 0;
 }
 
 static int msm_smmu_remove(struct platform_device *pdev)
@@ -564,11 +470,7 @@
 	struct msm_smmu_client *client;
 
 	client = platform_get_drvdata(pdev);
-	if (client->domain_attached) {
-		__depr_arm_iommu_detach_device(client->dev);
-		client->domain_attached = false;
-	}
-	__depr_arm_iommu_release_mapping(client->mmu_mapping);
+	client->domain_attached = false;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 7436776..f1d5f0e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2329,6 +2329,16 @@
 		sde_kms->aspace[domain] : NULL;
 }
 
+static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
+		unsigned int domain)
+{
+	struct msm_gem_address_space *aspace =
+		_sde_kms_get_address_space(kms, domain);
+
+	return (aspace && aspace->domain_attached) ?
+			msm_gem_get_aspace_device(aspace) : NULL;
+}
+
 static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
 {
 	struct drm_device *dev = NULL;
@@ -2821,6 +2831,7 @@
 	.cont_splash_config = sde_kms_cont_splash_config,
 	.register_events = _sde_kms_register_events,
 	.get_address_space = _sde_kms_get_address_space,
+	.get_address_space_device = _sde_kms_get_address_space_device,
 	.postopen = _sde_kms_post_open,
 	.check_for_splash = sde_kms_check_for_splash,
 };
@@ -2833,19 +2844,13 @@
 
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
 {
-	struct msm_mmu *mmu;
 	int i;
 
 	for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
 		if (!sde_kms->aspace[i])
 			continue;
 
-		mmu = sde_kms->aspace[i]->mmu;
-
-		mmu->funcs->detach(mmu, (const char **)iommu_ports,
-				ARRAY_SIZE(iommu_ports));
 		msm_gem_address_space_put(sde_kms->aspace[i]);
-
 		sde_kms->aspace[i] = NULL;
 	}
 
@@ -2856,7 +2861,7 @@
 {
 	struct msm_mmu *mmu;
 	int i, ret;
-	int early_map = 1;
+	int early_map = 0;
 
 	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
 		struct msm_gem_address_space *aspace;
@@ -2869,41 +2874,15 @@
 			continue;
 		}
 
-		/*
-		 * Before attaching SMMU, we need to honor continuous splash
-		 * use case where hardware tries to fetch buffer from physical
-		 * address. To facilitate this requirement we need to have a
-		 * one to one mapping on SMMU until we have our first frame.
-		 */
-		if (i == MSM_SMMU_DOMAIN_UNSECURE) {
-			ret = mmu->funcs->set_attribute(mmu,
-				DOMAIN_ATTR_EARLY_MAP,
-				&early_map);
-			if (ret) {
-				SDE_ERROR("failed to set map att: %d\n", ret);
-				goto fail;
-			}
-		}
-
 		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
 			mmu, "sde");
 		if (IS_ERR(aspace)) {
 			ret = PTR_ERR(aspace);
-			mmu->funcs->destroy(mmu);
 			goto fail;
 		}
 
 		sde_kms->aspace[i] = aspace;
-
-		ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
-				ARRAY_SIZE(iommu_ports));
-		if (ret) {
-			SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
-			msm_gem_address_space_put(aspace);
-			goto fail;
-		}
 		aspace->domain_attached = true;
-		early_map = 0;
 
 		/* Mapping splash memory block */
 		if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
@@ -2916,18 +2895,20 @@
 		}
 
 		/*
-		 * Turning off early map after generating one to one
-		 * mapping for splash address space.
+		 * disable early-map which would have been enabled during
+		 * bootup by smmu through the device-tree hint for cont-spash
 		 */
 		ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
-			&early_map);
+				 &early_map);
 		if (ret) {
-			SDE_ERROR("failed to set map att ret:%d\n", ret);
+			SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
+					ret, early_map);
 			goto early_map_fail;
 		}
 	}
 
 	return 0;
+
 early_map_fail:
 	_sde_kms_unmap_all_splash_regions(sde_kms);
 fail:
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 483c064..4f8013c 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -1029,7 +1029,6 @@
 #define A6XX_GMU_AO_RGMU_GLM_HW_CRC_DISABLE	0x23B82
 
 /* GMU RSC control registers */
-#define A6XX_GPU_RSCC_RSC_STATUS0_DRV0		0x23404
 #define A6XX_GMU_RSCC_CONTROL_REQ		0x23B07
 #define A6XX_GMU_RSCC_CONTROL_ACK		0x23B08
 
@@ -1042,23 +1041,24 @@
 #define A6XX_GPU_CC_GX_DOMAIN_MISC		0x24542
 
 /* GPU RSC sequencer registers */
-#define	A6XX_RSCC_PDC_SEQ_START_ADDR			0x23408
-#define A6XX_RSCC_PDC_MATCH_VALUE_LO			0x23409
-#define A6XX_RSCC_PDC_MATCH_VALUE_HI			0x2340A
-#define A6XX_RSCC_PDC_SLAVE_ID_DRV0			0x2340B
-#define A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR			0x2340D
-#define A6XX_RSCC_HIDDEN_TCS_CMD0_DATA			0x2340E
-#define A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0	0x23482
-#define A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0	0x23483
-#define A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0		0x23489
-#define A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0		0x2348C
-#define A6XX_RSCC_OVERRIDE_START_ADDR			0x23500
-#define A6XX_RSCC_SEQ_BUSY_DRV0				0x23501
-#define A6XX_RSCC_SEQ_MEM_0_DRV0			0x23580
-#define A6XX_RSCC_TCS0_DRV0_STATUS			0x23746
-#define A6XX_RSCC_TCS1_DRV0_STATUS                      0x237EE
-#define A6XX_RSCC_TCS2_DRV0_STATUS                      0x23896
-#define A6XX_RSCC_TCS3_DRV0_STATUS                      0x2393E
+#define A6XX_GPU_RSCC_RSC_STATUS0_DRV0			0x00004
+#define A6XX_RSCC_PDC_SEQ_START_ADDR			0x00008
+#define A6XX_RSCC_PDC_MATCH_VALUE_LO			0x00009
+#define A6XX_RSCC_PDC_MATCH_VALUE_HI			0x0000A
+#define A6XX_RSCC_PDC_SLAVE_ID_DRV0			0x0000B
+#define A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR			0x0000D
+#define A6XX_RSCC_HIDDEN_TCS_CMD0_DATA			0x0000E
+#define A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0	0x00082
+#define A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0	0x00083
+#define A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0		0x00089
+#define A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0		0x0008C
+#define A6XX_RSCC_OVERRIDE_START_ADDR			0x00100
+#define A6XX_RSCC_SEQ_BUSY_DRV0				0x00101
+#define A6XX_RSCC_SEQ_MEM_0_DRV0			0x00180
+#define A6XX_RSCC_TCS0_DRV0_STATUS			0x00346
+#define A6XX_RSCC_TCS1_DRV0_STATUS                      0x003EE
+#define A6XX_RSCC_TCS2_DRV0_STATUS                      0x00496
+#define A6XX_RSCC_TCS3_DRV0_STATUS                      0x0053E
 
 /* GPU PDC sequencer registers in AOSS.RPMh domain */
 #define PDC_GPU_ENABLE_PDC			0x1140
@@ -1095,6 +1095,13 @@
  */
 #define PDC_GPU_SEQ_MEM_0			0x0
 
+/*
+ * Legacy RSCC register range was a part of the GMU register space
+ * now we are using a separate section for RSCC regsiters. Add the
+ * offset for backward compatibility.
+ */
+#define RSCC_OFFSET_LEGACY			0x23400
+
 /* RGMU(PCC) registers in A6X_GMU_CX_0_NON_CONTEXT_DEC domain */
 #define A6XX_RGMU_CX_INTR_GEN_EN		0x1F80F
 #define A6XX_RGMU_CX_RGMU_TIMER0		0x1F834
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b8e00d9..182aff7 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1210,6 +1210,25 @@
 					res->start, adreno_dev->cx_misc_len);
 }
 
+static void adreno_rscc_probe(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct resource *res;
+
+	res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+						"rscc");
+
+	if (res == NULL)
+		return;
+
+	adreno_dev->rscc_base = res->start - device->reg_phys;
+	adreno_dev->rscc_len = resource_size(res);
+	adreno_dev->rscc_virt = devm_ioremap(device->dev, res->start,
+						adreno_dev->rscc_len);
+	if (adreno_dev->rscc_virt == NULL)
+		dev_warn(device->dev, "rscc ioremap failed\n");
+}
+
 static void adreno_efuse_read_soc_hw_rev(struct adreno_device *adreno_dev)
 {
 	unsigned int val;
@@ -1335,6 +1354,7 @@
 	/* Probe for the optional CX_MISC block */
 	adreno_cx_misc_probe(device);
 
+	adreno_rscc_probe(device);
 	/*
 	 * qcom,iommu-secure-id is used to identify MMUs that can handle secure
 	 * content but that is only part of the story - the GPU also has to be
@@ -3205,6 +3225,24 @@
 	 */
 	rmb();
 }
+void adreno_rscc_regread(struct adreno_device *adreno_dev,
+	unsigned int offsetwords, unsigned int *value)
+{
+	unsigned int rscc_offset;
+
+	rscc_offset = (offsetwords << 2);
+	if (!adreno_dev->rscc_virt ||
+		(rscc_offset >= adreno_dev->rscc_len))
+		return;
+
+	*value =  __raw_readl(adreno_dev->rscc_virt + rscc_offset);
+
+	/*
+	 * ensure this read finishes before the next one.
+	 * i.e. act like normal readl()
+	 */
+	rmb();
+}
 
 void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
 	unsigned int offsetwords, unsigned int value)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 8014161..af2fbb7 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -124,6 +124,8 @@
 #define ADRENO_ACD BIT(17)
 /* ECP enabled GMU */
 #define ADRENO_ECP BIT(18)
+/* Cooperative reset enabled GMU */
+#define ADRENO_COOP_RESET BIT(19)
 
 /*
  * Adreno GPU quirks - control bits for various workarounds
@@ -439,6 +441,9 @@
  * @gmem_size: GMEM size
  * @cx_misc_len: Length of the CX MISC register block
  * @cx_misc_virt: Pointer where the CX MISC block is mapped
+ * @rscc_base: Base physical address of the RSCC
+ * @rscc_len: Length of the RSCC register block
+ * @rscc_virt: Pointer where RSCC block is mapped
  * @gpucore: Pointer to the adreno_gpu_core structure
  * @pfp_fw: Buffer which holds the pfp ucode
  * @pfp_fw_size: Size of pfp ucode buffer
@@ -521,6 +526,9 @@
 	void __iomem *cx_dbgc_virt;
 	unsigned int cx_misc_len;
 	void __iomem *cx_misc_virt;
+	unsigned long rscc_base;
+	unsigned int rscc_len;
+	void __iomem *rscc_virt;
 	const struct adreno_gpu_core *gpucore;
 	struct adreno_firmware fw[2];
 	size_t gpmu_cmds_size;
@@ -1180,6 +1188,8 @@
 void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev,
 		unsigned int offsetwords,
 		unsigned int mask, unsigned int bits);
+void adreno_rscc_regread(struct adreno_device *adreno_dev,
+		unsigned int offsetwords, unsigned int *value);
 
 
 #define ADRENO_TARGET(_name, _id) \
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index 2e9ebd6..cc2b7aa 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -135,6 +135,39 @@
 	return -ETIMEDOUT;
 }
 
+static inline int timed_poll_check_rscc(struct kgsl_device *device,
+		unsigned int offset, unsigned int expected_ret,
+		unsigned int timeout, unsigned int mask)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned long t;
+	unsigned int value;
+
+	t = jiffies + msecs_to_jiffies(timeout);
+
+	do {
+		if (adreno_is_a650(adreno_dev))
+			adreno_rscc_regread(adreno_dev, offset, &value);
+		else
+			gmu_core_regread(device, offset + RSCC_OFFSET_LEGACY,
+						&value);
+		if ((value & mask) == expected_ret)
+			return 0;
+		/* Wait 100us to reduce unnecessary AHB bus traffic */
+		usleep_range(10, 100);
+	} while (!time_after(jiffies, t));
+
+	/* Double check one last time */
+	if (adreno_is_a650(adreno_dev))
+		adreno_rscc_regread(adreno_dev, offset, &value);
+	else
+		gmu_core_regread(device, offset + RSCC_OFFSET_LEGACY, &value);
+	if ((value & mask) == expected_ret)
+		return 0;
+
+	return -ETIMEDOUT;
+}
+
 /*
  * read_AO_counter() - Returns the 64bit always on counter value
  *
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index efff9ef..5f044fc 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -81,8 +81,8 @@
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct resource *res_pdc, *res_cfg, *res_seq;
-	void __iomem *cfg = NULL, *seq = NULL;
-	unsigned int cfg_offset, seq_offset;
+	void __iomem *cfg = NULL, *seq = NULL, *rscc;
+	unsigned int cfg_offset, seq_offset, rscc_offset;
 
 	/* Offsets from the base PDC (if no PDC subsections in the DTSI) */
 	if (adreno_is_a640v2(adreno_dev)) {
@@ -93,6 +93,10 @@
 		seq_offset = 0x280000;
 	}
 
+	if (adreno_is_a650(adreno_dev))
+		rscc = adreno_dev->rscc_virt;
+	else
+		rscc = device->gmu_core.reg_virt + 0x23000;
 	/*
 	 * Older A6x platforms specified PDC registers in the DT using a
 	 * single base pointer that encompassed the entire PDC range. Current
@@ -111,7 +115,6 @@
 			"kgsl_gmu_pdc_cfg");
 	res_seq = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
 			"kgsl_gmu_pdc_seq");
-
 	/*
 	 * Map the starting address for pdc_cfg programming. If the pdc_cfg
 	 * resource is not available use an offset from the base PDC resource.
@@ -142,37 +145,33 @@
 	}
 
 	/* Disable SDE clock gating */
-	gmu_core_regwrite(device, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+	_regwrite(rscc, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
 
 	/* Setup RSC PDC handshake for sleep and wakeup */
-	gmu_core_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
-	gmu_core_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
-	gmu_core_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
-	gmu_core_regwrite(device,
-			A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
-	gmu_core_regwrite(device,
-			A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
-	gmu_core_regwrite(device,
-			A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
+	_regwrite(rscc, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+	_regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+	_regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+	_regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
+	_regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
+	_regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
 			0x80000000);
-	gmu_core_regwrite(device,
-			A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
+	_regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
 			0);
-	gmu_core_regwrite(device, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
-	gmu_core_regwrite(device, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
-	gmu_core_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
-	gmu_core_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+	_regwrite(rscc, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+	_regwrite(rscc, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+	_regwrite(rscc, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+	_regwrite(rscc, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
 
 	/* Enable timestamp event for v1 only */
 	if (adreno_is_a630v1(adreno_dev))
-		gmu_core_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+		_regwrite(rscc, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
 
 	/* Load RSC sequencer uCode for sleep and wakeup */
-	gmu_core_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
-	gmu_core_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
-	gmu_core_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
-	gmu_core_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
-	gmu_core_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
+	_regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
+	_regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
+	_regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
+	_regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
+	_regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
 
 	/* Load PDC sequencer uCode for power up and power down sequence */
 	_regwrite(seq, PDC_GPU_SEQ_MEM_0, 0xFEBEA1E1);
@@ -314,6 +313,14 @@
 static int a6xx_gmu_start(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+	u32 val = 0x00000100;
+	u32 mask = 0x000001FF;
+
+	/* Check for 0xBABEFACE on legacy targets */
+	if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_COOP_RESET)) {
+		val = 0xBABEFACE;
+		mask = 0xFFFFFFFF;
+	}
 
 	kgsl_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG, 0x0);
 
@@ -321,9 +328,7 @@
 	gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
 	if (timed_poll_check(device,
 			A6XX_GMU_CM3_FW_INIT_RESULT,
-			0xBABEFACE,
-			GMU_START_TIMEOUT,
-			0xFFFFFFFF)) {
+			val, GMU_START_TIMEOUT, mask)) {
 		dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
 		return -ETIMEDOUT;
 	}
@@ -383,7 +388,7 @@
 		return -EINVAL;
 	}
 
-	if (timed_poll_check(device,
+	if (timed_poll_check_rscc(device,
 			A6XX_RSCC_SEQ_BUSY_DRV0,
 			0,
 			GPU_START_TIMEOUT,
@@ -419,20 +424,21 @@
 
 	/* RSC sleep sequence is different on v1 */
 	if (adreno_is_a630v1(adreno_dev))
-		gmu_core_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+		gmu_core_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 +
+						RSCC_OFFSET_LEGACY, 1);
 
 	gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
 	/* Make sure the request completes before continuing */
 	wmb();
 
 	if (adreno_is_a630v1(adreno_dev))
-		ret = timed_poll_check(device,
+		ret = timed_poll_check_rscc(device,
 				A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
 				BIT(0),
 				GPU_START_TIMEOUT,
 				BIT(0));
 	else
-		ret = timed_poll_check(device,
+		ret = timed_poll_check_rscc(device,
 				A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
 				BIT(16),
 				GPU_START_TIMEOUT,
@@ -446,11 +452,11 @@
 	/* Read to clear the timestamp valid signal. Don't care what we read. */
 	if (adreno_is_a630v1(adreno_dev)) {
 		gmu_core_regread(device,
-				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
-				&ret);
+				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 +
+					RSCC_OFFSET_LEGACY, &ret);
 		gmu_core_regread(device,
-				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
-				&ret);
+				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 +
+					RSCC_OFFSET_LEGACY, &ret);
 	}
 
 	gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
@@ -694,14 +700,14 @@
 	if (!gmu_core_isenabled(device))
 		return ret;
 
-	ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
-			GPU_RESET_TIMEOUT, BIT(0));
-	ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
-			GPU_RESET_TIMEOUT, BIT(0));
-	ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
-			GPU_RESET_TIMEOUT, BIT(0));
-	ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
-			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check_rscc(device, A6XX_RSCC_TCS0_DRV0_STATUS,
+			BIT(0), GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check_rscc(device, A6XX_RSCC_TCS1_DRV0_STATUS,
+			BIT(0), GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check_rscc(device, A6XX_RSCC_TCS2_DRV0_STATUS,
+			BIT(0), GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check_rscc(device, A6XX_RSCC_TCS3_DRV0_STATUS,
+			BIT(0), GPU_RESET_TIMEOUT, BIT(0));
 
 	return ret;
 }
@@ -953,7 +959,6 @@
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
 	uint32_t gmu_log_info;
 	int ret;
 	unsigned int chipid = 0;
@@ -996,7 +1001,7 @@
 	gmu_core_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, gmu->load_mode);
 
 	gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
-			mem_addr->gmuaddr);
+			gmu->hfi_mem->gmuaddr);
 	gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);
 
 	gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
@@ -1506,6 +1511,7 @@
 		return 0;
 	}
 
+	memset(mem_hdr, 0, sizeof(*mem_hdr));
 	mem_hdr->type = desc->type;
 	mem_hdr->hostaddr = (uintptr_t)desc->memdesc->hostptr;
 	mem_hdr->gmuaddr = desc->memdesc->gmuaddr;
@@ -1517,6 +1523,94 @@
 	return desc->memdesc->size + sizeof(*mem_hdr);
 }
 
+struct a6xx_tcm_data {
+	enum gmu_mem_type type;
+	u32 start;
+	u32 last;
+};
+
+static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_gmu_mem *mem_hdr =
+		(struct kgsl_snapshot_gmu_mem *)buf;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
+	unsigned int i, bytes;
+	struct a6xx_tcm_data *tcm = priv;
+
+	bytes = (tcm->last - tcm->start + 1) << 2;
+
+	if (remain < bytes + sizeof(*mem_hdr)) {
+		SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
+		return 0;
+	}
+
+	mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
+	mem_hdr->hostaddr = 0;
+	mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device),
+			tcm->type);
+	mem_hdr->gpuaddr = 0;
+
+	for (i = tcm->start; i <= tcm->last; i++)
+		kgsl_regread(device, i, data++);
+
+	return bytes + sizeof(*mem_hdr);
+}
+
+static void a6xx_gmu_snapshot_memories(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+	struct gmu_mem_type_desc desc;
+	struct gmu_memdesc *md;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(gmu->kmem_entries); i++) {
+		if (!test_bit(i, &gmu->kmem_bitmap))
+			continue;
+
+		md = &gmu->kmem_entries[i];
+		if (!md->size)
+			continue;
+
+		desc.memdesc = md;
+		if (md == gmu->hfi_mem)
+			desc.type = SNAPSHOT_GMU_MEM_HFI;
+		else if (md == gmu->gmu_log)
+			desc.type = SNAPSHOT_GMU_MEM_LOG;
+		else if (md == gmu->dump_mem)
+			desc.type = SNAPSHOT_GMU_MEM_DEBUG;
+		else
+			desc.type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
+
+		if (md->mem_type == GMU_ITCM) {
+			struct a6xx_tcm_data tcm = {
+				.type = md->mem_type,
+				.start = a6xx_gmu_tcm_registers[0],
+				.last = a6xx_gmu_tcm_registers[1],
+			};
+
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
+				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
+		} else if (md->mem_type == GMU_DTCM) {
+			struct a6xx_tcm_data tcm = {
+				.type = md->mem_type,
+				.start = a6xx_gmu_tcm_registers[2],
+				.last = a6xx_gmu_tcm_registers[3],
+			};
+
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
+				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
+		} else {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
+				snapshot, a6xx_snapshot_gmu_mem, &desc);
+		}
+	}
+}
+
 struct kgsl_snapshot_gmu_version {
 	uint32_t type;
 	uint32_t value;
@@ -1566,42 +1660,9 @@
 				&gmu_vers[i]);
 }
 
-struct a6xx_tcm_data {
-	enum gmu_mem_type type;
-	u32 start;
-	u32 last;
-};
-
-static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device,
-		u8 *buf, size_t remain, void *priv)
-{
-	struct kgsl_snapshot_gmu_mem *mem_hdr =
-		(struct kgsl_snapshot_gmu_mem *)buf;
-	unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr));
-	unsigned int i, bytes;
-	struct a6xx_tcm_data *tcm = priv;
-
-	bytes = (tcm->last - tcm->start + 1) << 2;
-
-	if (remain < bytes + sizeof(*mem_hdr)) {
-		SNAPSHOT_ERR_NOMEM(device, "GMU Memory");
-		return 0;
-	}
-
-	mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK;
-	mem_hdr->hostaddr = 0;
-	mem_hdr->gmuaddr = gmu_get_memtype_base(tcm->type);
-	mem_hdr->gpuaddr = 0;
-
-	for (i = tcm->start; i <= tcm->last; i++)
-		kgsl_regread(device, i, data++);
-
-	return bytes + sizeof(*mem_hdr);
-}
-
 /*
  * a6xx_gmu_snapshot() - A6XX GMU snapshot function
- * @adreno_dev: Device being snapshotted
+ * @device: Device being snapshotted
  * @snapshot: Pointer to the snapshot instance
  *
  * This is where all of the A6XX GMU specific bits and pieces are grabbed
@@ -1610,51 +1671,21 @@
 static void a6xx_gmu_snapshot(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot)
 {
-	struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-	struct gmu_mem_type_desc desc[] = {
-		{gmu->hfi_mem, SNAPSHOT_GMU_MEM_HFI},
-		{gmu->gmu_log, SNAPSHOT_GMU_MEM_LOG},
-		{gmu->dump_mem, SNAPSHOT_GMU_MEM_DEBUG},
-	};
-	unsigned int val, i;
+	unsigned int val;
 
 	if (!gmu_core_isenabled(device))
 		return;
 
 	a6xx_gmu_snapshot_versions(device, snapshot);
 
-	for (i = 0; i < ARRAY_SIZE(desc); i++) {
-		if (desc[i].memdesc)
-			kgsl_snapshot_add_section(device,
-					KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
-					snapshot, a6xx_snapshot_gmu_mem,
-					&desc[i]);
-	}
+	a6xx_gmu_snapshot_memories(device, snapshot);
 
-	if (adreno_is_a640(adreno_dev) || adreno_is_a650(adreno_dev) ||
-			adreno_is_a680(adreno_dev)) {
-		struct a6xx_tcm_data tcm = {
-			.type = GMU_ITCM,
-			.start = a6xx_gmu_tcm_registers[0],
-			.last = a6xx_gmu_tcm_registers[1],
-		};
-
-		kgsl_snapshot_add_section(device,
-				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
-				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
-
-		tcm.type = GMU_DTCM;
-		tcm.start = a6xx_gmu_tcm_registers[2],
-		tcm.last = a6xx_gmu_tcm_registers[3],
-
-		kgsl_snapshot_add_section(device,
-				KGSL_SNAPSHOT_SECTION_GMU_MEMORY,
-				snapshot, a6xx_snapshot_gmu_tcm, &tcm);
-	} else {
+	/* Snapshot tcms as registers for legacy targets */
+	if (adreno_is_a630(ADRENO_DEVICE(device)) ||
+			adreno_is_a615_family(ADRENO_DEVICE(device)))
 		adreno_snapshot_registers(device, snapshot,
 				a6xx_gmu_tcm_registers,
 				ARRAY_SIZE(a6xx_gmu_tcm_registers) / 2);
-	}
 
 	adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
 					ARRAY_SIZE(a6xx_gmu_registers) / 2);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 4c1297f..fdf1680 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -38,16 +38,30 @@
 #define DUMMY_SIZE   SZ_4K
 
 /* Define target specific GMU VMA configurations */
-static const struct gmu_vma_entry {
+
+struct gmu_vma_entry {
 	unsigned int start;
 	unsigned int size;
-} gmu_vma[] = {
+};
+
+static const struct gmu_vma_entry gmu_vma_legacy[] = {
 	[GMU_ITCM] = { .start = 0x00000, .size = SZ_16K },
 	[GMU_ICACHE] = { .start = 0x04000, .size = (SZ_256K - SZ_16K) },
 	[GMU_DTCM] = { .start = 0x40000, .size = SZ_16K },
 	[GMU_DCACHE] = { .start = 0x44000, .size = (SZ_256K - SZ_16K) },
 	[GMU_NONCACHED_KERNEL] = { .start = 0x60000000, .size = SZ_512M },
 	[GMU_NONCACHED_USER] = { .start = 0x80000000, .size = SZ_1G },
+	[GMU_MEM_TYPE_MAX] = { .start = 0x0, .size = 0x0 },
+};
+
+static const struct gmu_vma_entry gmu_vma[] = {
+	[GMU_ITCM] = { .start = 0x00000000, .size = SZ_16K },
+	[GMU_CACHE] = { .start = SZ_16K, .size = (SZ_16M - SZ_16K) },
+	[GMU_DTCM] = { .start = SZ_256M + SZ_16K, .size = SZ_16K },
+	[GMU_DCACHE] = { .start = 0x0, .size = 0x0 },
+	[GMU_NONCACHED_KERNEL] = { .start = 0x60000000, .size = SZ_512M },
+	[GMU_NONCACHED_USER] = { .start = 0x80000000, .size = SZ_1G },
+	[GMU_MEM_TYPE_MAX] = { .start = 0x0, .size = 0x0 },
 };
 
 struct gmu_iommu_context gmu_ctx[] = {
@@ -70,9 +84,10 @@
 static void gmu_snapshot(struct kgsl_device *device);
 static void gmu_remove(struct kgsl_device *device);
 
-unsigned int gmu_get_memtype_base(enum gmu_mem_type type)
+unsigned int gmu_get_memtype_base(struct gmu_device *gmu,
+		enum gmu_mem_type type)
 {
-	return gmu_vma[type].start;
+	return gmu->vma[type].start;
 }
 
 static int _gmu_iommu_fault_handler(struct device *dev,
@@ -218,7 +233,7 @@
 	case GMU_NONCACHED_KERNEL:
 		/* Set start address for first uncached kernel alloc */
 		if (next_uncached_kernel_alloc == 0)
-			next_uncached_kernel_alloc = gmu_vma[mem_type].start;
+			next_uncached_kernel_alloc = gmu->vma[mem_type].start;
 
 		if (addr == 0)
 			addr = next_uncached_kernel_alloc;
@@ -230,7 +245,7 @@
 	case GMU_NONCACHED_USER:
 		/* Set start address for first uncached user alloc */
 		if (next_uncached_kernel_alloc == 0)
-			next_uncached_user_alloc = gmu_vma[mem_type].start;
+			next_uncached_user_alloc = gmu->vma[mem_type].start;
 
 		if (addr == 0)
 			addr = next_uncached_user_alloc;
@@ -395,14 +410,15 @@
 
 }
 
-static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_block_header *blk)
+static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_device *gmu,
+		struct gmu_block_header *blk)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(gmu_vma); i++) {
-		if (blk->addr >= gmu_vma[i].start &&
+	for (i = 0; i < GMU_MEM_TYPE_MAX; i++) {
+		if (blk->addr >= gmu->vma[i].start &&
 				blk->addr + blk->value <=
-				gmu_vma[i].start + gmu_vma[i].size)
+				gmu->vma[i].start + gmu->vma[i].size)
 			return (enum gmu_mem_type)i;
 	}
 
@@ -420,7 +436,7 @@
 	if (md)
 		return 0;
 
-	type = gmu_get_blk_memtype(blk);
+	type = gmu_get_blk_memtype(gmu, blk);
 	if (type >= GMU_MEM_TYPE_MAX)
 		return -EINVAL;
 
@@ -1273,14 +1289,14 @@
 	struct gmu_memdesc *md;
 
 	/* Reserve a memdesc for ITCM. No actually memory allocated */
-	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu_vma[GMU_ITCM].start,
-			gmu_vma[GMU_ITCM].size, 0);
+	md = allocate_gmu_kmem(gmu, GMU_ITCM, gmu->vma[GMU_ITCM].start,
+			gmu->vma[GMU_ITCM].size, 0);
 	if (IS_ERR(md))
 		return PTR_ERR(md);
 
 	/* Reserve a memdesc for DTCM. No actually memory allocated */
-	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu_vma[GMU_DTCM].start,
-			gmu_vma[GMU_DTCM].size, 0);
+	md = allocate_gmu_kmem(gmu, GMU_DTCM, gmu->vma[GMU_DTCM].start,
+			gmu->vma[GMU_DTCM].size, 0);
 
 	return PTR_ERR_OR_ZERO(md);
 }
@@ -1295,16 +1311,19 @@
 		return 0;
 
 	md = allocate_gmu_kmem(gmu, GMU_ICACHE,
-			gmu_vma[GMU_ICACHE].start, gmu_vma[GMU_ICACHE].size,
+			gmu->vma[GMU_ICACHE].start, gmu->vma[GMU_ICACHE].size,
 			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
 	if (IS_ERR(md))
 		return PTR_ERR(md);
 
-	md = allocate_gmu_kmem(gmu, GMU_DCACHE,
-			gmu_vma[GMU_DCACHE].start, gmu_vma[GMU_DCACHE].size,
-			(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
-	if (IS_ERR(md))
-		return PTR_ERR(md);
+	if (!adreno_is_a650(ADRENO_DEVICE(device))) {
+		md = allocate_gmu_kmem(gmu, GMU_DCACHE,
+				gmu->vma[GMU_DCACHE].start,
+				gmu->vma[GMU_DCACHE].size,
+				(IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
+		if (IS_ERR(md))
+			return PTR_ERR(md);
+	}
 
 	md = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL,
 			0, DUMMY_SIZE,
@@ -1362,6 +1381,11 @@
 	if (ret)
 		goto error;
 
+	if (adreno_is_a650(adreno_dev))
+		gmu->vma = gmu_vma;
+	else
+		gmu->vma = gmu_vma_legacy;
+
 	ret = gmu_tcm_init(gmu);
 	if (ret)
 		goto error;
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 6e746b7..e5845b7 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -85,6 +85,7 @@
 enum gmu_mem_type {
 	GMU_ITCM = 0,
 	GMU_ICACHE,
+	GMU_CACHE = GMU_ICACHE,
 	GMU_DTCM,
 	GMU_DCACHE,
 	GMU_NONCACHED_KERNEL,
@@ -212,11 +213,13 @@
 	bool preallocations;
 	struct gmu_memdesc kmem_entries[GMU_KERNEL_ENTRIES];
 	unsigned long kmem_bitmap;
+	const struct gmu_vma_entry *vma;
 };
 
 struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
 		unsigned int addr, unsigned int size);
-unsigned int gmu_get_memtype_base(enum gmu_mem_type type);
+unsigned int gmu_get_memtype_base(struct gmu_device *gmu,
+		enum gmu_mem_type type);
 
 int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk);
 int gmu_memory_probe(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 6973126..2d388bf 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -50,11 +50,8 @@
 	if (hdr->status == HFI_QUEUE_STATUS_DISABLED)
 		return -EINVAL;
 
-	if (hdr->read_index == hdr->write_index) {
-		hdr->rx_req = 1;
-		result = -ENODATA;
-		goto done;
-	}
+	if (hdr->read_index == hdr->write_index)
+		return -ENODATA;
 
 	/* Clear the output data before populating */
 	memset(output, 0, max_size);
@@ -133,7 +130,6 @@
 			"Insufficient bufsize %d for msg id=%d of size %d\n",
 			empty_space, id, size);
 
-		hdr->drop_cnt++;
 		mutex_unlock(&hfi->cmdq_mutex);
 		return -ENOSPC;
 	}
@@ -213,21 +209,15 @@
 	tbl->qtbl_hdr.num_q = HFI_QUEUE_MAX;
 	tbl->qtbl_hdr.num_active_q = HFI_QUEUE_MAX;
 
-	/* Fill I dividual Queue Headers */
+	memset(&tbl->qhdr[0], 0, sizeof(tbl->qhdr));
+
+	/* Fill Individual Queue Headers */
 	for (i = 0; i < HFI_QUEUE_MAX; i++) {
 		hdr = &tbl->qhdr[i];
 		hdr->start_addr = GMU_QUEUE_START_ADDR(mem_addr, i);
 		hdr->type = QUEUE_HDR_TYPE(queue[i].idx, queue[i].pri, 0,  0);
 		hdr->status = queue[i].status;
 		hdr->queue_size = HFI_QUEUE_SIZE >> 2; /* convert to dwords */
-		hdr->msg_size = 0;
-		hdr->drop_cnt = 0;
-		hdr->rx_wm = 0x1;
-		hdr->tx_wm = 0x1;
-		hdr->rx_req = 0x1;
-		hdr->tx_req = 0x0;
-		hdr->read_index = 0x0;
-		hdr->write_index = 0x0;
 	}
 
 	mutex_init(&hfi->cmdq_mutex);
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index e1de8a4..cdc874a 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -127,11 +127,6 @@
  * @queue_size: size of the queue
  * @msg_size: size of the message if each message has fixed size.
  *	Otherwise, 0 means variable size of message in the queue.
- * @drop_cnt: count of dropped messages
- * @rx_wm: receiver watermark
- * @tx_wm: sender watermark
- * @rx_req: receiver request
- * @tx_req: sender request
  * @read_index: read index of the queue
  * @write_index: write index of the queue
  */
@@ -141,11 +136,11 @@
 	uint32_t type;
 	uint32_t queue_size;
 	uint32_t msg_size;
-	uint32_t drop_cnt;
-	uint32_t rx_wm;
-	uint32_t tx_wm;
-	uint32_t rx_req;
-	uint32_t tx_req;
+	uint32_t unused0;
+	uint32_t unused1;
+	uint32_t unused2;
+	uint32_t unused3;
+	uint32_t unused4;
 	uint32_t read_index;
 	uint32_t write_index;
 };
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index a0ef1f1..fcbc574 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -1,13 +1,10 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_IOMMU_H
 #define __KGSL_IOMMU_H
 
-#ifdef CONFIG_QCOM_IOMMU
-#include <linux/qcom_iommu.h>
-#endif
 #include <linux/of.h>
 #include "kgsl.h"
 
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 1cca950..fa7723e 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
  */
 #ifndef __KGSL_MMU_H
 #define __KGSL_MMU_H
@@ -394,31 +394,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_QCOM_IOMMU
-#include <linux/qcom_iommu.h>
-#ifndef CONFIG_ARM_SMMU
-static inline bool kgsl_mmu_bus_secured(struct device *dev)
-{
-	struct bus_type *bus = msm_iommu_get_bus(dev);
-
-	return (bus == &msm_iommu_sec_bus_type) ? true : false;
-}
-#else
-static inline bool kgsl_mmu_bus_secured(struct device *dev)
-{
-	/* ARM driver contains all context banks on single bus */
-	return true;
-}
-#endif /* CONFIG_ARM_SMMU */
-static inline struct bus_type *kgsl_mmu_get_bus(struct device *dev)
-{
-	return msm_iommu_get_bus(dev);
-}
-static inline struct device *kgsl_mmu_get_ctx(const char *name)
-{
-	return msm_iommu_get_ctx(name);
-}
-#else
 static inline bool kgsl_mmu_bus_secured(struct device *dev)
 {
 	/*ARM driver contains all context banks on single bus */
@@ -433,6 +408,5 @@
 {
 	return ERR_PTR(-ENODEV);
 }
-#endif
 
 #endif /* __KGSL_MMU_H */
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index c915425..6c78677 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -13,3 +13,4 @@
 source "drivers/media/platform/msm/sde/Kconfig"
 source "drivers/media/platform/msm/vidc/Kconfig"
 source "drivers/media/platform/msm/npu/Kconfig"
+source "drivers/media/platform/msm/synx/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index 6d72d38..0a2de13 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_MSM_CVP_V4L2) += cvp/
 obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
 obj-$(CONFIG_MSM_NPU) += npu/
+obj-$(CONFIG_MSM_GLOBAL_SYNX) += synx/
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
index 137bc71..e44d25c 100644
--- a/drivers/media/platform/msm/camera/cam_core/Makefile
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -4,5 +4,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index 2509a16..5a7748d 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -669,7 +669,7 @@
 				if (cam_mem_put_cpu_buf(
 					io_cfg[i].mem_handle[j]))
 					CAM_WARN(CAM_FD,
-						"Invalid cpu buf %d %d %d %d",
+						"Invalid cpu buf %d %d %d",
 						io_cfg[i].direction,
 						io_cfg[i].resource_type, j);
 			}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 882e2b0..6e20aed 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -3354,8 +3354,9 @@
 			if (rc) {
 				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
 					hw_mgr->iommu_hdl);
-				num_cmd_buf = (num_cmd_buf > 0) ?
-					num_cmd_buf-- : 0;
+
+				if (num_cmd_buf > 0)
+					num_cmd_buf--;
 				goto rel_cmd_buf;
 			}
 			*fw_cmd_buf_iova_addr = addr;
@@ -3367,8 +3368,9 @@
 				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
 					hw_mgr->iommu_hdl);
 				*fw_cmd_buf_iova_addr = 0;
-				num_cmd_buf = (num_cmd_buf > 0) ?
-					num_cmd_buf-- : 0;
+
+				if (num_cmd_buf > 0)
+					num_cmd_buf--;
 				goto rel_cmd_buf;
 			}
 			cpu_addr = cpu_addr + cmd_desc[i].offset;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index f514850..347b797 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -2708,7 +2708,7 @@
 		goto put_ref;
 
 	if (cam_mem_put_cpu_buf((int32_t) cmd->packet_handle))
-		CAM_WARN(CAM_ISP, "Can not put packet address : 0x%x",
+		CAM_WARN(CAM_ISP, "Can not put packet address : 0x%llx",
 			cmd->packet_handle);
 
 	CAM_DBG(CAM_REQ,
@@ -2725,7 +2725,7 @@
 	}
 free_cpu_buf:
 	if (cam_mem_put_cpu_buf((int32_t) cmd->packet_handle))
-		CAM_WARN(CAM_ISP, "Can not put packet address: 0x%x",
+		CAM_WARN(CAM_ISP, "Can not put packet address: 0x%llx",
 			cmd->packet_handle);
 free_req:
 	spin_lock_bh(&ctx->lock);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
index f29a670..7fc49be 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -11,6 +11,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 54e0200..6163409 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -4529,23 +4529,25 @@
 
 		hw_res_left = isp_ife_out_res->hw_res[0];
 		if (hw_res_left && (evt_payload->core_index ==
-			hw_res_left->hw_intf->hw_idx))
+			hw_res_left->hw_intf->hw_idx)) {
 			rup_status = hw_res_left->bottom_half_handler(
 				hw_res_left, evt_payload);
+
+			if (rup_status == 0)
+				break;
+		}
 	}
 
-	CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
-
 	if (!rup_status) {
-		CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
-
-	if (!atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
-		ife_hwr_irq_rup_cb(
-			ife_hwr_mgr_ctx->common.cb_priv,
+		if (!atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+			ife_hwr_irq_rup_cb(
+				ife_hwr_mgr_ctx->common.cb_priv,
 				CAM_ISP_HW_EVENT_REG_UPDATE,
 				&rup_event_data);
 	}
 
+	CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index c7f5600..1a5e363 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1841,7 +1841,7 @@
 	}
 
 	if (!pxl_reg) {
-		CAM_ERR(CAM_ISP, "CSID:%d &s %d is not supported on HW",
+		CAM_ERR(CAM_ISP, "CSID:%d %s %d is not supported on HW",
 			csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
 			res->res_id);
 		return -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index 1d3a4f3..2429cda 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -191,9 +191,9 @@
 
 	if (!rc && soc_private->cpas_version == CAM_CPAS_TITAN_175_V120)
 		rc = cam_cpas_unregister_client(soc_private->cpas_handle[1]);
-		if (rc)
-			CAM_ERR(CAM_ISP, "CPAS1 unregistration failed rc=%d",
-				rc);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS1 unregistration failed rc=%d",
+			rc);
 
 	rc = cam_vfe_release_platform_resource(soc_info);
 	if (rc < 0)
@@ -240,11 +240,11 @@
 	if (!rc && soc_private->cpas_version == CAM_CPAS_TITAN_175_V120)
 		rc = cam_cpas_start(soc_private->cpas_handle[1], &ahb_vote,
 			&axi_vote);
-		if (rc) {
-			CAM_ERR(CAM_ISP, "Error! CPAS1 start failed rc=%d", rc);
-			rc = -EFAULT;
-			goto end;
-		}
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! CPAS1 start failed rc=%d", rc);
+		rc = -EFAULT;
+		goto end;
+	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, true);
@@ -338,10 +338,10 @@
 
 	if (!rc && soc_private->cpas_version == CAM_CPAS_TITAN_175_V120)
 		rc = cam_cpas_stop(soc_private->cpas_handle[1]);
-		if (rc) {
-			CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
-			return rc;
-		}
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
+		return rc;
+	}
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 37afbd2..fe3481c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
-/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ratelimit.h>
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
index 5602e7c..9ad5c40 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -968,7 +968,7 @@
 			rsrc_data->height = 0;
 			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
 			rsrc_data->pack_fmt = 0x0;
-			rsrc_data->en_cfg = 0x3;
+			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
 			break;
 		case CAM_FORMAT_PLAIN8:
 			rsrc_data->en_cfg = 0x1;
@@ -985,7 +985,7 @@
 			rsrc_data->height = 0;
 			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
 			rsrc_data->pack_fmt = 0x0;
-			rsrc_data->en_cfg = 0x3;
+			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
 			break;
 		case CAM_FORMAT_PLAIN64:
 			rsrc_data->en_cfg = 0x1;
@@ -1083,18 +1083,24 @@
 			return -EINVAL;
 		}
 		rsrc_data->en_cfg = 0x1;
-	} else if (rsrc_data->index > 11 && rsrc_data->index < 21) {
-		/* WM 12-20 stats */
+	} else if (rsrc_data->index == 20) {
+		/* WM 20 stats BAF */
 		rsrc_data->width = 0;
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
-		rsrc_data->en_cfg = 0x3;
+		rsrc_data->en_cfg = (0x2 << 16) | 0x1;
+	} else if (rsrc_data->index > 11 && rsrc_data->index < 20) {
+		/* WM 12-19 stats */
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
 	} else if (rsrc_data->index == 11 || rsrc_data->index == 21) {
 		/* WM 21/11 PDAF/2PD */
 		rsrc_data->width = 0;
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
-		rsrc_data->en_cfg = 0x3;
+		rsrc_data->en_cfg = (0x1 << 16) | 0x1;
 		if (vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_PDAF)
 			/* LSB aligned */
 			rsrc_data->pack_fmt |= 0x10;
@@ -1111,7 +1117,7 @@
 			rsrc_data->width = 0;
 			rsrc_data->height = 0;
 			rsrc_data->stride = 1;
-			rsrc_data->en_cfg = 0x3;
+			rsrc_data->en_cfg = (0x1 << 16) | 0x1;
 			/* LSB aligned */
 			rsrc_data->pack_fmt |= 0x10;
 			break;
@@ -2885,7 +2891,8 @@
 	}
 
 	// no clock gating at bus input
-	cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base +
+	CAM_INFO(CAM_ISP, "Overriding clock gating at bus input");
+	cam_io_w_mb(0x3FFFFFF, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->cgc_ovd);
 
 	// BUS_WR_TEST_BUS_CTRL
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 7c5d13f..149d45e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -327,7 +327,7 @@
 	for (wm_idx = 0; wm_idx <= 23; wm_idx++) {
 		offset = 0x2214 + 0x100 * wm_idx;
 		CAM_INFO(CAM_ISP,
-			"BUS_WM%u offset 0x%x val 0x%x offset 0x%x val 0x%x",
+			"BUS_WM%u offset 0x%x val 0x%x offset 0x%x val 0x%x offset 0x%x val 0x%x offset 0x%x val 0x%x",
 			wm_idx, offset,
 			cam_io_r_mb(camif_priv->mem_base + offset),
 			offset + 4, cam_io_r_mb(camif_priv->mem_base +
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index c165115..97df245 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -18,6 +18,7 @@
 #include "cam_cpas_api.h"
 
 #define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
+#define CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT 0x2800
 
 struct cam_vfe_mux_camif_ver3_data {
 	void __iomem                                *mem_base;
@@ -256,7 +257,11 @@
 	cam_io_w_mb(val,
 		rsrc_data->mem_base + rsrc_data->camif_reg->module_cfg);
 	CAM_DBG(CAM_ISP, "write module_cfg val = 0x%x", val);
-	val = 0x0;
+
+	val = cam_io_r_mb(rsrc_data->mem_base +
+		rsrc_data->camif_reg->module_cfg);
+
+	val |= CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT;
 
 	/* AF stitching by hw disabled by default
 	 * PP CAMIF currently operates only in offline mode
@@ -274,8 +279,8 @@
 	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
 		val |= (1 << rsrc_data->reg_data->pp_extern_reg_update_shift);
 
-	cam_io_w_mb(val,
-		rsrc_data->mem_base + rsrc_data->common_reg->core_cfg_0);
+	cam_io_w_mb(val, rsrc_data->mem_base +
+		rsrc_data->common_reg->core_cfg_0);
 
 	/* epoch config */
 	switch (soc_private->cpas_version) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
index b69de65..3a4036b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_fe_ver1.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -381,11 +381,11 @@
 		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
 	}
 
-	cam_cpas_reg_read((uint32_t)soc_private->cpas_handle,
+	cam_cpas_reg_read(soc_private->cpas_handle[0],
 		CAM_CPAS_REG_CAMNOC, 0x420, true, &val);
 	CAM_INFO(CAM_ISP, "IFE02_MAXWR_LOW offset 0x420 val 0x%x", val);
 
-	cam_cpas_reg_read((uint32_t)soc_private->cpas_handle,
+	cam_cpas_reg_read(soc_private->cpas_handle[0],
 		CAM_CPAS_REG_CAMNOC, 0x820, true, &val);
 	CAM_INFO(CAM_ISP, "IFE13_MAXWR_LOW offset 0x820 val 0x%x", val);
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
index 2c82f2d..5ef1cd3 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -3,6 +3,7 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_core.o\
 				cam_req_mgr_dev.o \
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 382e364..e6cff40 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -995,7 +995,7 @@
 				CAM_ERR(CAM_MEM,
 					"Failed, dmabuf=%pK, kmdvaddr=%pK",
 					tbl.bufq[idx].dma_buf,
-					tbl.bufq[idx].kmdvaddr);
+					(void *) tbl.bufq[idx].kmdvaddr);
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 864796a..1275504 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -2786,7 +2786,7 @@
 
 	if (sched_req->req_id <= link->last_flush_id) {
 		CAM_INFO(CAM_CRM,
-			"request %d is flushed, last_flush_id to flush %lld",
+			"request %lld is flushed, last_flush_id to flush %d",
 			sched_req->req_id, link->last_flush_id);
 		rc = -EINVAL;
 		goto end;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index bcfb2f5..8e1acae 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -673,7 +673,7 @@
 	}
 
 	if (cam_mem_put_cpu_buf(config.packet_handle))
-		CAM_WARN(CAM_ACTUATOR, "Fail to put cmd buffer: 0x%x",
+		CAM_WARN(CAM_ACTUATOR, "Fail to put cmd buffer: 0x%llx",
 			config.packet_handle);
 
 	return rc;
@@ -684,7 +684,7 @@
 			cmd_desc[i].mem_handle);
 rel_pkt_buf:
 	if (cam_mem_put_cpu_buf(config.packet_handle))
-		CAM_WARN(CAM_ACTUATOR, "Fail to put cmd buffer: 0x%x",
+		CAM_WARN(CAM_ACTUATOR, "Fail to put cmd buffer: 0x%llx",
 			config.packet_handle);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
index bf4be8d..1be58be 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
@@ -6,5 +6,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci_dev.o cam_cci_core.o cam_cci_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 456afa3..bd41660 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -242,7 +242,7 @@
 
 rel_pkt_buf:
 	if (cam_mem_put_cpu_buf((int32_t) cfg_dev->packet_handle))
-		CAM_WARN(CAM_CSIPHY, "Failed to put packet Mem address: 0x%x",
+		CAM_WARN(CAM_CSIPHY, "Failed to put packet Mem address: 0x%llx",
 			 cfg_dev->packet_handle);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 8f6b6e2..f71f53d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -1358,7 +1358,7 @@
 	}
 
 	if (cam_mem_put_cpu_buf(dev_config.packet_handle))
-		CAM_WARN(CAM_EEPROM, "Put cpu buffer failed : 0x%x",
+		CAM_WARN(CAM_EEPROM, "Put cpu buffer failed : 0x%llx",
 			dev_config.packet_handle);
 
 	return rc;
@@ -1378,7 +1378,7 @@
 	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
 release_buf:
 	if (cam_mem_put_cpu_buf(dev_config.packet_handle))
-		CAM_WARN(CAM_EEPROM, "Put cpu buffer failed : 0x%x",
+		CAM_WARN(CAM_EEPROM, "Put cpu buffer failed : 0x%llx",
 			dev_config.packet_handle);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index e7eca3e..1e45967 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -672,7 +672,7 @@
 	cam_ois_power_down(o_ctrl);
 rel_pkt:
 	if (cam_mem_put_cpu_buf(dev_config.packet_handle))
-		CAM_WARN(CAM_OIS, "Fail in put buffer: 0x%x",
+		CAM_WARN(CAM_OIS, "Fail in put buffer: 0x%llx",
 			dev_config.packet_handle);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index 60c76c7..123b7b3f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -143,7 +143,7 @@
 		csl_packet->header.request_id <= s_ctrl->last_flush_req
 		&& s_ctrl->last_flush_req != 0) {
 		CAM_ERR(CAM_SENSOR,
-			"reject request %lld, last request to flush %lld",
+			"reject request %lld, last request to flush %u",
 			csl_packet->header.request_id, s_ctrl->last_flush_req);
 		rc = -EINVAL;
 		goto rel_pkt_buf;
@@ -254,7 +254,7 @@
 
 rel_pkt_buf:
 	if (cam_mem_put_cpu_buf(config.packet_handle))
-		CAM_WARN(CAM_SENSOR, "Failed in put the buffer: 0x%x",
+		CAM_WARN(CAM_SENSOR, "Failed in put the buffer: 0x%llx",
 			config.packet_handle);
 
 	return rc;
@@ -515,7 +515,7 @@
 	}
 
 	if (cam_mem_put_cpu_buf(handle))
-		CAM_WARN(CAM_SENSOR, "Failed to put the command Buffer: 0x%x",
+		CAM_WARN(CAM_SENSOR, "Failed to put the command Buffer: 0x%llx",
 			handle);
 
 	return rc;
@@ -526,7 +526,7 @@
 			cmd_desc[i].mem_handle);
 rel_pkt_buf:
 	if (cam_mem_put_cpu_buf(handle))
-		CAM_WARN(CAM_SENSOR, "Failed to put the command Buffer: 0x%x",
+		CAM_WARN(CAM_SENSOR, "Failed to put the command Buffer: 0x%llx",
 			handle);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
index 0effea5..310b0a5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -7,5 +7,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) +=  cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
index a86f72e..410f3a7 100644
--- a/drivers/media/platform/msm/camera/cam_sync/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
index eb222ba..695a8fb 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #if !defined(_CAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +11,7 @@
 #undef TRACE_INCLUDE_PATH
 #define TRACE_INCLUDE_PATH .
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE cam_trace
+#define TRACE_INCLUDE_FILE ../../drivers/media/platform/msm/camera/cam_utils/cam_trace
 
 #include <linux/tracepoint.h>
 #include <media/cam_req_mgr.h>
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index a8754ee..c3b2eac 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -2430,8 +2430,7 @@
 
 	__set_default_sys_properties(device);
 
-	if (call_hfi_pkt_op(dev, session_init, &pkt,
-			s, session_type, codec_type)) {
+	if (call_hfi_pkt_op(dev, session_init, &pkt, s)) {
 		dprintk(CVP_ERR, "session_init: failed to create packet\n");
 		goto err_session_init_fail;
 	}
@@ -4308,8 +4307,6 @@
 		goto fail_venus_power_on;
 	}
 
-	/*FIXME: proceed if TZ CVP PIL works */
-	return 0;
 	if ((!device->res->use_non_secure_pil && !device->res->firmware_base)
 			|| device->res->use_non_secure_pil) {
 		if (!device->resources.fw.cookie)
@@ -4365,7 +4362,7 @@
 	device->resources.fw.cookie = NULL;
 	__deinit_resources(device);
 
-	dprintk(CVP_PROF, "Firmware unloaded successfully\n");
+	dprintk(CVP_DBG, "Firmware unloaded successfully\n");
 }
 
 static int venus_hfi_get_fw_info(void *dev, struct hal_fw_info *fw_info)
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index e545892..31d122a 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -933,12 +933,22 @@
 	u32 rg_property_data[1];
 };
 
+enum HFI_SESSION_TYPE {
+	HFI_SESSION_CV = 1,
+	HFI_SESSION_LRME,
+	HFI_SESSION_ODT,
+	HFI_SESSION_FD
+};
+
 struct hfi_cmd_sys_session_init_packet {
 	u32 size;
 	u32 packet_type;
 	u32 session_id;
-	u32 session_domain;
-	u32 session_codec;
+	u32 session_type;
+	u32 session_kmask;
+	u32 session_prio;
+	u32 is_secure;
+	u32 dsp_ac_mask;
 };
 
 struct hfi_cmd_sys_session_end_packet {
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index f984768..67306e1 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -407,8 +407,7 @@
 
 inline int cvp_create_pkt_cmd_sys_session_init(
 		struct hfi_cmd_sys_session_init_packet *pkt,
-		struct hal_session *session,
-		u32 session_domain, u32 session_codec)
+		struct hal_session *session)
 {
 	int rc = 0;
 
@@ -418,10 +417,11 @@
 	pkt->size = sizeof(struct hfi_cmd_sys_session_init_packet);
 	pkt->packet_type = HFI_CMD_SYS_SESSION_INIT;
 	pkt->session_id = hash32_ptr(session);
-	pkt->session_domain = cvp_get_hfi_domain(session_domain);
-	pkt->session_codec = cvp_get_hfi_codec(session_codec);
-	if (!pkt->session_codec)
-		return -EINVAL;
+	pkt->session_kmask = 0xFFFFFFFF;
+	pkt->session_type = HFI_SESSION_CV;
+	pkt->session_prio = 0;
+	pkt->is_secure = 0;
+	pkt->dsp_ac_mask = 0;
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.h b/drivers/media/platform/msm/cvp/hfi_packetization.h
index 1b68572..e13c1e5 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.h
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.h
@@ -40,8 +40,7 @@
 		struct hfi_cmd_sys_test_ssr_packet *pkt);
 	int (*session_init)(
 		struct hfi_cmd_sys_session_init_packet *pkt,
-		struct hal_session *session,
-		u32 session_domain, u32 session_codec);
+		struct hal_session *session);
 	int (*session_cmd)(struct cvp_hal_session_cmd_pkt *pkt,
 		int pkt_type, struct hal_session *session);
 	int (*session_set_buffers)(
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
index e455469..b3a8c0d 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
@@ -205,8 +205,11 @@
 		dprintk(CVP_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
-	hdev = core->device;
 
+	if (!core->resources.bus_devfreq_on)
+		return 0;
+
+	hdev = core->device;
 	vote_data = kzalloc(sizeof(struct cvp_bus_vote_data) *
 			MAX_SUPPORTED_INSTANCES, GFP_ATOMIC);
 	if (!vote_data) {
@@ -818,6 +821,9 @@
 		return -EINVAL;
 	}
 
+	if (!inst->core->resources.bus_devfreq_on)
+		return 0;
+
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) {
 		if (temp->vvb.vb2_buf.type ==
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_debug.c b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
index 8d5d7c5..2ab6d44 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_debug.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
@@ -8,7 +8,7 @@
 #include "msm_cvp_debug.h"
 #include "cvp_hfi_api.h"
 
-int msm_cvp_debug = CVP_ERR | CVP_WARN;
+int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_DBG;
 EXPORT_SYMBOL(msm_cvp_debug);
 
 int msm_cvp_debug_out = CVP_OUT_PRINTK;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_platform.c b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
index 2dd9b26..dfc2855 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_platform.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_platform.c
@@ -164,6 +164,10 @@
 		.key = "qcom,fw-cycles",
 		.value = 760000,
 	},
+	{
+		.key = "qcom,use-devfreq-scale-bus",
+		.value = 0,
+	},
 };
 
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
index e2262b0..ca47d00 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
@@ -763,6 +763,8 @@
 			"qcom,dcvs");
 	res->fw_cycles = find_key_value(platform_data,
 			"qcom,fw-cycles");
+	res->bus_devfreq_on = find_key_value(platform_data,
+			"qcom,use-devfreq-scale-bus");
 
 	res->csc_coeff_data = &platform_data->csc_data;
 
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_resources.h b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
index 58c852b..4b4e149 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_resources.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_resources.h
@@ -192,6 +192,7 @@
 	struct msm_cvp_mem_cdsp mem_cdsp;
 	uint32_t vpu_ver;
 	uint32_t fw_cycles;
+	uint32_t bus_devfreq_on;
 };
 
 static inline bool is_iommu_present(struct msm_cvp_platform_resources *res)
diff --git a/drivers/media/platform/msm/synx/Kconfig b/drivers/media/platform/msm/synx/Kconfig
new file mode 100644
index 0000000..cb65194
--- /dev/null
+++ b/drivers/media/platform/msm/synx/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menuconfig MSM_GLOBAL_SYNX
+	bool "Qualcomm Technologies, Inc. Global Synchronization Framework"
+	depends on ARCH_QCOM
+	help
+	  Say Y here to enable global synchronization framework for
+	  Qualcomm Technologies, Inc. chipsets.
+	  Enabling this adds support for global synchronization across
+	  heterogeneous cores.
+
diff --git a/drivers/media/platform/msm/synx/Makefile b/drivers/media/platform/msm/synx/Makefile
new file mode 100644
index 0000000..5a782e7
--- /dev/null
+++ b/drivers/media/platform/msm/synx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-$(CONFIG_SPECTRA_CAMERA) += -Idrivers/media/platform/msm/camera/cam_sync
+obj-$(CONFIG_MSM_GLOBAL_SYNX) += synx.o synx_util.o
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
new file mode 100644
index 0000000..30e22f0
--- /dev/null
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -0,0 +1,1476 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#define pr_fmt(fmt) "synx: " fmt
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_SPECTRA_CAMERA
+#include <cam_sync_api.h>
+#endif
+
+#include "synx_api.h"
+#include "synx_util.h"
+
+struct synx_device *synx_dev;
+
+void synx_external_callback(s32 sync_obj, int status, void *data)
+{
+	struct synx_table_row *row = NULL;
+	struct synx_external_data *bind_data = data;
+
+	if (bind_data) {
+		row = synx_from_key(bind_data->synx_obj, bind_data->secure_key);
+		kfree(bind_data);
+	}
+
+	if (row) {
+		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		row->signaling_id = sync_obj;
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+		pr_debug("signaling synx 0x%x from external callback %d\n",
+			row->synx_obj, sync_obj);
+		synx_signal(row->synx_obj, status);
+	} else {
+		pr_err("invalid callback from sync external obj %d\n",
+			sync_obj);
+	}
+}
+
+bool synx_fence_enable_signaling(struct dma_fence *fence)
+{
+	return true;
+}
+
+const char *synx_fence_driver_name(struct dma_fence *fence)
+{
+	return "Global Synx driver";
+}
+
+void synx_fence_release(struct dma_fence *fence)
+{
+	struct synx_table_row *row = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	row = synx_from_fence(fence);
+	if (row) {
+		/* metadata (row) will be cleared in the deinit function */
+		synx_deinit_object(row);
+	}
+
+	pr_debug("Exit %s\n", __func__);
+}
+
+static struct dma_fence_ops synx_fence_ops = {
+	.wait = dma_fence_default_wait,
+	.enable_signaling = synx_fence_enable_signaling,
+	.get_driver_name = synx_fence_driver_name,
+	.get_timeline_name = synx_fence_driver_name,
+	.release = synx_fence_release,
+};
+
+int synx_create(s32 *synx_obj, const char *name)
+{
+	int rc;
+	long idx;
+	bool bit;
+	s32 id;
+	struct synx_table_row *row = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	do {
+		idx = find_first_zero_bit(synx_dev->bitmap, SYNX_MAX_OBJS);
+		if (idx >= SYNX_MAX_OBJS)
+			return -ENOMEM;
+		pr_debug("index location available at idx: %ld\n", idx);
+		bit = test_and_set_bit(idx, synx_dev->bitmap);
+	} while (bit);
+
+	/* global synx id */
+	id = synx_create_handle(synx_dev->synx_table + idx);
+	rc = synx_init_object(synx_dev->synx_table,
+			idx, id, name, &synx_fence_ops);
+	if (rc) {
+		pr_err("unable to init row at idx = %ld\n", idx);
+		clear_bit(idx, synx_dev->bitmap);
+		return -EINVAL;
+	}
+
+	row = synx_dev->synx_table + idx;
+	rc = synx_activate(row);
+	if (rc) {
+		pr_err("unable to activate row at idx = %ld\n", idx);
+		synx_deinit_object(row);
+		return -EINVAL;
+	}
+
+	*synx_obj = row->synx_obj;
+
+	pr_debug("row: synx id: 0x%x, index: %ld\n",
+		row->synx_obj, row->index);
+	pr_debug("Exit %s\n", __func__);
+
+	return rc;
+}
+
+int synx_register_callback(s32 synx_obj,
+	void *userdata, synx_callback cb_func)
+{
+	u32 state = SYNX_STATE_INVALID;
+	struct synx_callback_info *synx_cb;
+	struct synx_callback_info *temp_cb_info;
+	struct synx_table_row *row = NULL;
+
+	row = synx_from_handle(synx_obj);
+	if (!row || !cb_func)
+		return -EINVAL;
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	state = synx_status_locked(row);
+	/* do not register if callback registered earlier */
+	list_for_each_entry(temp_cb_info, &row->callback_list, list) {
+		if (temp_cb_info->callback_func == cb_func &&
+			temp_cb_info->cb_data == userdata) {
+			pr_err("duplicate registration for synx 0x%x\n",
+				row->synx_obj);
+			spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+			return -EALREADY;
+		}
+	}
+
+	synx_cb = kzalloc(sizeof(*synx_cb), GFP_ATOMIC);
+	if (!synx_cb) {
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		return -ENOMEM;
+	}
+
+	synx_cb->callback_func = cb_func;
+	synx_cb->cb_data = userdata;
+	synx_cb->synx_obj = synx_obj;
+	INIT_WORK(&synx_cb->cb_dispatch_work, synx_util_cb_dispatch);
+
+	/* trigger callback if synx object is already in SIGNALED state */
+	if (state == SYNX_STATE_SIGNALED_SUCCESS ||
+		state == SYNX_STATE_SIGNALED_ERROR) {
+		synx_cb->status = state;
+		pr_debug("callback triggered for synx 0x%x\n",
+			synx_cb->synx_obj);
+		queue_work(synx_dev->work_queue,
+			&synx_cb->cb_dispatch_work);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		return 0;
+	}
+
+	list_add_tail(&synx_cb->list, &row->callback_list);
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	return 0;
+}
+
+int synx_deregister_callback(s32 synx_obj,
+	synx_callback cb_func,
+	void *userdata,
+	synx_callback cancel_cb_func)
+{
+	u32 state = SYNX_STATE_INVALID;
+	struct synx_table_row *row = NULL;
+	struct synx_callback_info *synx_cb, *temp;
+
+	row = synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	state = synx_status_locked(row);
+	pr_debug("de-registering callback for synx 0x%x\n",
+		row->synx_obj);
+	list_for_each_entry_safe(synx_cb, temp, &row->callback_list, list) {
+		if (synx_cb->callback_func == cb_func &&
+			synx_cb->cb_data == userdata) {
+			list_del_init(&synx_cb->list);
+			if (cancel_cb_func) {
+				synx_cb->status = SYNX_CALLBACK_RESULT_CANCELED;
+				synx_cb->callback_func = cancel_cb_func;
+				queue_work(synx_dev->work_queue,
+					&synx_cb->cb_dispatch_work);
+			} else {
+				kfree(synx_cb);
+			}
+		}
+	}
+
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	return 0;
+}
+
+int synx_signal(s32 synx_obj, u32 status)
+{
+	int rc;
+	u32 i = 0;
+	u32 idx = 0;
+	u32 type;
+	s32 sync_id;
+	struct synx_table_row *row = NULL;
+	struct synx_external_data *data = NULL;
+	struct synx_bind_desc bind_descs[SYNX_MAX_NUM_BINDINGS];
+	struct bind_operations *bind_ops = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	row = synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	if (status != SYNX_STATE_SIGNALED_SUCCESS &&
+		status != SYNX_STATE_SIGNALED_ERROR) {
+		pr_err("signaling with undefined status = %d\n",
+			status);
+		return -EINVAL;
+	}
+
+	if (is_merged_synx(row)) {
+		pr_err("signaling a composite synx object 0x%x\n",
+			synx_obj);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	if (synx_status_locked(row) != SYNX_STATE_ACTIVE) {
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		pr_err("object already signaled synx = 0x%x\n",
+			synx_obj);
+		return -EALREADY;
+	}
+
+	/* set fence error to model {signal w/ error} */
+	if (status == SYNX_STATE_SIGNALED_ERROR)
+		dma_fence_set_error(row->fence, -EINVAL);
+
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	rc = dma_fence_signal(row->fence);
+	if (rc < 0) {
+		pr_err("unable to signal synx 0x%x, err: %d\n",
+			row->synx_obj, rc);
+		if (status != SYNX_STATE_SIGNALED_ERROR) {
+			dma_fence_set_error(row->fence, -EINVAL);
+			status = SYNX_STATE_SIGNALED_ERROR;
+		}
+	}
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	synx_callback_dispatch(row);
+
+	/*
+	 * signal the external bound sync obj/s even if fence signal fails,
+	 * w/ error signal state (set above) to prevent deadlock
+	 */
+	if (row->num_bound_synxs > 0) {
+		memset(bind_descs, 0,
+			sizeof(struct synx_bind_desc) * SYNX_MAX_NUM_BINDINGS);
+		for (i = 0; i < row->num_bound_synxs; i++) {
+			/* signal invoked by external sync obj */
+			if (row->signaling_id ==
+				row->bound_synxs[i].external_desc.id[0]) {
+				pr_debug("signaling_bound_sync: %d, skipping\n",
+					row->signaling_id);
+				memset(&row->bound_synxs[i], 0,
+					sizeof(struct synx_bind_desc));
+				continue;
+			}
+			memcpy(&bind_descs[idx++],
+				&row->bound_synxs[i],
+				sizeof(struct synx_bind_desc));
+			/* clear the memory, its been backed up above */
+			memset(&row->bound_synxs[i], 0,
+				sizeof(struct synx_bind_desc));
+		}
+		row->num_bound_synxs = 0;
+	}
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	for (i = 0; i < idx; i++) {
+		type = bind_descs[i].external_desc.type;
+		sync_id = bind_descs[i].external_desc.id[0];
+		data = bind_descs[i].external_data;
+		if (is_valid_type(type)) {
+			bind_ops = &synx_dev->bind_vtbl[type];
+			if (!bind_ops->deregister_callback ||
+				!bind_ops->signal) {
+				pr_err("invalid bind ops for %u\n", type);
+				kfree(data);
+				continue;
+			}
+			/*
+			 * we are already signaled, so don't want to
+			 * recursively be signaled
+			 */
+			bind_ops->deregister_callback(synx_external_callback,
+				data, sync_id);
+			pr_debug("signaling external sync: %d, status: %u\n",
+				sync_id, status);
+			bind_ops->signal(sync_id, status);
+		} else {
+			pr_warn("unimplemented external type: %u\n", type);
+		}
+
+		/*
+		 * release the memory allocated for external data.
+		 * It is safe to release this memory as external cb
+		 * has been already deregistered before this.
+		 */
+		kfree(data);
+	}
+
+	pr_debug("Exit %s\n", __func__);
+	return rc;
+}
+
+int synx_merge(s32 *synx_objs, u32 num_objs, s32 *synx_merged)
+{
+	int rc;
+	long idx = 0;
+	bool bit;
+	s32 id;
+	u32 count = 0;
+	struct dma_fence **fences = NULL;
+	struct synx_table_row *row = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	if (!synx_objs || !synx_merged) {
+		pr_err("invalid pointer(s)\n");
+		return -EINVAL;
+	}
+
+	rc = synx_util_validate_merge(synx_objs, num_objs, &fences, &count);
+	if (rc < 0) {
+		pr_err("validation failed, merge not allowed\n");
+		rc = -EINVAL;
+		goto free;
+	}
+
+	do {
+		idx = find_first_zero_bit(synx_dev->bitmap, SYNX_MAX_OBJS);
+		if (idx >= SYNX_MAX_OBJS) {
+			rc = -ENOMEM;
+			goto free;
+		}
+		bit = test_and_set_bit(idx, synx_dev->bitmap);
+	} while (bit);
+
+	/* global synx id */
+	id = synx_create_handle(synx_dev->synx_table + idx);
+
+	rc = synx_init_group_object(synx_dev->synx_table,
+			idx, id, fences, count);
+	if (rc < 0) {
+		pr_err("unable to init row at idx = %ld\n", idx);
+		goto clear;
+	}
+
+	row = synx_dev->synx_table + idx;
+	rc = synx_activate(row);
+	if (rc) {
+		pr_err("unable to activate row at idx = %ld, synx 0x%x\n",
+			idx, id);
+		goto clear;
+	}
+
+	*synx_merged = row->synx_obj;
+
+	pr_debug("row (merged): synx 0x%x, index: %ld\n",
+		row->synx_obj, row->index);
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+
+clear:
+	clear_bit(idx, synx_dev->bitmap);
+free:
+	synx_merge_error(synx_objs, count);
+	if (num_objs <= count)
+		kfree(fences);
+	return rc;
+}
+
+int synx_release(s32 synx_obj)
+{
+	s32 idx;
+	struct dma_fence *fence = NULL;
+	struct synx_table_row *row  = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	row = synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	/*
+	 * metadata might be cleared after invoking dma_fence_put
+	 * (definitely for merged synx on invoing deinit)
+	 * be carefull while accessing the metadata
+	 */
+	fence = row->fence;
+	idx = row->index;
+	spin_lock_bh(&synx_dev->row_spinlocks[idx]);
+	if (synx_status_locked(row) == SYNX_STATE_ACTIVE) {
+		pr_err("need to signal before release synx = 0x%x\n",
+			synx_obj);
+		spin_unlock_bh(&synx_dev->row_spinlocks[idx]);
+		return -EINVAL;
+	}
+
+	/*
+	 * we need to clear the metadata for merged synx obj upon synx_release
+	 * itself as it does not invoke the synx_fence_release function.
+	 * See synx_export for more explanation.
+	 */
+	if (is_merged_synx(row))
+		synx_deinit_object(row);
+
+	/* do not reference fence and row in the function after this */
+	dma_fence_put(fence);
+	spin_unlock_bh(&synx_dev->row_spinlocks[idx]);
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+}
+
+int synx_wait(s32 synx_obj, u64 timeout_ms)
+{
+	unsigned long timeleft;
+	struct synx_table_row *row = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	row = synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	timeleft = dma_fence_wait_timeout(row->fence, (bool) 0,
+					msecs_to_jiffies(timeout_ms));
+	if (timeleft <= 0) {
+		pr_err("timed out for synx obj 0x%x\n", synx_obj);
+		return -ETIMEDOUT;
+	}
+
+	if (synx_status(row) != SYNX_STATE_SIGNALED_SUCCESS) {
+		pr_err("signaled error on synx obj 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+}
+
+int synx_bind(s32 synx_obj, struct synx_external_desc external_sync)
+{
+	int rc = 0;
+	u32 i = 0;
+	struct synx_table_row *row = NULL;
+	struct synx_external_data *data = NULL;
+	struct bind_operations *bind_ops = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	row = (struct synx_table_row *)synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	if (is_merged_synx(row)) {
+		pr_err("cannot bind to merged fence: 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	if (is_valid_type(external_sync.type)) {
+		pr_err("invalid external sync object\n");
+		return -EINVAL;
+	}
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	if (synx_status_locked(row) != SYNX_STATE_ACTIVE) {
+		pr_err("bind to non-active synx is prohibited 0x%x\n",
+			synx_obj);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		kfree(data);
+		return -EINVAL;
+	}
+
+	if (row->num_bound_synxs >= SYNX_MAX_NUM_BINDINGS) {
+		pr_err("max number of bindings reached for synx_objs 0x%x\n",
+			synx_obj);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		kfree(data);
+		return -ENOMEM;
+	}
+
+	/* don't bind external sync obj is already done */
+	for (i = 0; i < row->num_bound_synxs; i++) {
+		if (external_sync.id[0] ==
+			row->bound_synxs[i].external_desc.id[0]) {
+			pr_err("duplicate binding for external sync %d\n",
+				external_sync.id[0]);
+			spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+			kfree(data);
+			return -EALREADY;
+		}
+	}
+
+	bind_ops = &synx_dev->bind_vtbl[external_sync.type];
+	if (!bind_ops->register_callback) {
+		pr_err("invalid bind register for %u\n",
+			external_sync.type);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		kfree(data);
+		return -EINVAL;
+	}
+
+	/* data passed to external callback */
+	data->synx_obj = row->synx_obj;
+	data->secure_key = synx_generate_secure_key(row);
+
+	rc = bind_ops->register_callback(synx_external_callback,
+			data, external_sync.id[0]);
+	if (rc < 0) {
+		pr_err("callback registration failed for %d\n",
+			external_sync.id[0]);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		kfree(data);
+		return rc;
+	}
+
+	memcpy(&row->bound_synxs[row->num_bound_synxs],
+		   &external_sync, sizeof(struct synx_external_desc));
+	row->bound_synxs[row->num_bound_synxs].external_data = data;
+	row->num_bound_synxs = row->num_bound_synxs + 1;
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	pr_debug("added external sync %d to bindings of 0x%x\n",
+		external_sync.id[0], synx_obj);
+
+	pr_debug("Exit %s\n", __func__);
+	return rc;
+}
+
+int synx_get_status(s32 synx_obj)
+{
+	struct synx_table_row *row = NULL;
+
+	pr_debug("getting the status for synx 0x%x\n", synx_obj);
+
+	row = (struct synx_table_row *)synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", synx_obj);
+		return SYNX_STATE_INVALID;
+	}
+
+	return synx_status(row);
+}
+
+int synx_addrefcount(s32 synx_obj, s32 count)
+{
+	struct synx_table_row *row = NULL;
+
+	row = synx_from_handle(synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", synx_obj);
+		return -EINVAL;
+	}
+
+	if ((count < 0) || (count > SYNX_MAX_REF_COUNTS)) {
+		pr_err("invalid count, consider reducing : 0x%x\n",
+			synx_obj);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	while (count--)
+		dma_fence_get(row->fence);
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	return 0;
+}
+
+int synx_import(s32 synx_obj, u32 secure_key, s32 *new_synx_obj)
+{
+	bool bit;
+	s32 id;
+	long idx = 0;
+	struct synx_table_row *row = NULL;
+	struct synx_table_row *new_row = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	if (!synx_obj)
+		return -EINVAL;
+
+	row = synx_from_key(synx_obj, secure_key);
+	if (!row)
+		return -EINVAL;
+
+	/*
+	 * Reason for separate metadata (for merged synx) being
+	 * dma fence array has separate release func registed with
+	 * dma fence ops, which doesn't invoke release func registered
+	 * by the framework to clear metadata when all refs are released.
+	 * Hence we need to clear the metadata for merged synx obj
+	 * upon synx_release itself. But this creates a problem if
+	 * the synx obj is exported. Thus we need separate metadata
+	 * structures even though they represent same synx obj.
+	 * Note, only the metadata is released, and the fence reference
+	 * count is decremented still.
+	 */
+	if (is_merged_synx(row)) {
+		do {
+			idx = find_first_zero_bit(synx_dev->bitmap,
+					SYNX_MAX_OBJS);
+			if (idx >= SYNX_MAX_OBJS)
+				return -ENOMEM;
+			bit = test_and_set_bit(idx, synx_dev->bitmap);
+		} while (bit);
+
+		new_row = synx_dev->synx_table + idx;
+		/* new global synx id */
+		id = synx_create_handle(new_row);
+
+		/* both metadata points to same dma fence */
+		new_row->fence = row->fence;
+		new_row->index = idx;
+		new_row->synx_obj = id;
+	} else {
+		/* new global synx id. Imported synx points to same metadata */
+		id = synx_create_handle(row);
+	}
+
+	*new_synx_obj = id;
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+}
+
+int synx_export(s32 synx_obj, u32 *key)
+{
+	struct synx_table_row *row = NULL;
+
+	row = synx_from_handle(synx_obj);
+	if (!row)
+		return -EINVAL;
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+	*key = synx_generate_secure_key(row);
+
+	/*
+	 * to make sure the synx is not lost if the process dies or
+	 * synx is released before any other process gets a chance to
+	 * import it. The assumption is that an import will match this
+	 * and account for the extra reference. Otherwise, this will
+	 * be a dangling reference and needs to be garbage collected.
+	 */
+	dma_fence_get(row->fence);
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	return 0;
+}
+
+
+static int synx_handle_create(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_info synx_create_info;
+	int result;
+
+	if (k_ioctl->size != sizeof(synx_create_info))
+		return -EINVAL;
+
+	if (copy_from_user(&synx_create_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	result = synx_create(&synx_create_info.synx_obj,
+		synx_create_info.name);
+
+	if (!result)
+		if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&synx_create_info,
+			k_ioctl->size))
+			return -EFAULT;
+
+	return result;
+}
+
+static int synx_handle_getstatus(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_signal synx_status;
+
+	if (k_ioctl->size != sizeof(synx_status))
+		return -EINVAL;
+
+	if (copy_from_user(&synx_status,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	synx_status.synx_state = synx_get_status(synx_status.synx_obj);
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		&synx_status,
+		k_ioctl->size))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int synx_handle_import(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_id_info id_info;
+
+	if (k_ioctl->size != sizeof(id_info))
+		return -EINVAL;
+
+	if (copy_from_user(&id_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	if (synx_import(id_info.synx_obj, id_info.secure_key,
+		&id_info.new_synx_obj))
+		return -EINVAL;
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		&id_info,
+		k_ioctl->size))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int synx_handle_export(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_id_info id_info;
+
+	if (k_ioctl->size != sizeof(id_info))
+		return -EINVAL;
+
+	if (copy_from_user(&id_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	if (synx_export(id_info.synx_obj, &id_info.secure_key))
+		return -EINVAL;
+
+	if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		&id_info,
+		k_ioctl->size))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int synx_handle_signal(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_signal synx_signal_info;
+
+	if (k_ioctl->size != sizeof(synx_signal_info))
+		return -EINVAL;
+
+	if (copy_from_user(&synx_signal_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	return synx_signal(synx_signal_info.synx_obj,
+		synx_signal_info.synx_state);
+}
+
+static int synx_handle_merge(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_merge synx_merge_info;
+	s32 *synx_objs;
+	u32 num_objs;
+	u32 size;
+	int result;
+
+	if (k_ioctl->size != sizeof(synx_merge_info))
+		return -EINVAL;
+
+	if (copy_from_user(&synx_merge_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	if (synx_merge_info.num_objs >= SYNX_MAX_OBJS)
+		return -EINVAL;
+
+	size = sizeof(u32) * synx_merge_info.num_objs;
+	synx_objs = kcalloc(synx_merge_info.num_objs,
+					sizeof(*synx_objs), GFP_KERNEL);
+	if (!synx_objs)
+		return -ENOMEM;
+
+	if (copy_from_user(synx_objs,
+		u64_to_user_ptr(synx_merge_info.synx_objs),
+		sizeof(u32) * synx_merge_info.num_objs)) {
+		kfree(synx_objs);
+		return -EFAULT;
+	}
+
+	num_objs = synx_merge_info.num_objs;
+
+	result = synx_merge(synx_objs,
+		num_objs,
+		&synx_merge_info.merged);
+
+	if (!result)
+		if (copy_to_user(u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&synx_merge_info,
+			k_ioctl->size)) {
+			kfree(synx_objs);
+			return -EFAULT;
+	}
+
+	kfree(synx_objs);
+
+	return result;
+}
+
+static int synx_handle_wait(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_wait synx_wait_info;
+
+	if (k_ioctl->size != sizeof(synx_wait_info))
+		return -EINVAL;
+
+	if (copy_from_user(&synx_wait_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = synx_wait(synx_wait_info.synx_obj,
+		synx_wait_info.timeout_ms);
+
+	return 0;
+}
+
+static int synx_handle_register_user_payload(
+	struct synx_private_ioctl_arg *k_ioctl)
+{
+	u32 state = SYNX_STATE_INVALID;
+	struct synx_userpayload_info userpayload_info;
+	struct synx_cb_data *user_payload_kernel;
+	struct synx_cb_data *user_payload_iter, *temp;
+	struct synx_table_row *row = NULL;
+	struct synx_client *client = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	if (k_ioctl->size != sizeof(userpayload_info))
+		return -EINVAL;
+
+	if (copy_from_user(&userpayload_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	row = synx_from_handle(userpayload_info.synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", userpayload_info.synx_obj);
+		return -EINVAL;
+	}
+
+	mutex_lock(&synx_dev->table_lock);
+	client = get_current_client();
+	mutex_unlock(&synx_dev->table_lock);
+
+	if (!client) {
+		pr_err("couldn't find client for process %d\n", current->tgid);
+		return -EINVAL;
+	}
+
+	user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
+	if (!user_payload_kernel)
+		return -ENOMEM;
+
+	user_payload_kernel->client = client;
+	user_payload_kernel->data.synx_obj = row->synx_obj;
+	memcpy(user_payload_kernel->data.payload_data,
+		userpayload_info.payload,
+		SYNX_PAYLOAD_WORDS * sizeof(__u64));
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	state = synx_status_locked(row);
+	if (state == SYNX_STATE_SIGNALED_SUCCESS ||
+		state == SYNX_STATE_SIGNALED_ERROR) {
+		user_payload_kernel->data.status = state;
+		spin_lock_bh(&client->eventq_lock);
+		list_add_tail(&user_payload_kernel->list, &client->eventq);
+		spin_unlock_bh(&client->eventq_lock);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+		wake_up_all(&client->wq);
+		return 0;
+	}
+
+	list_for_each_entry_safe(user_payload_iter,
+		temp, &row->user_payload_list, list) {
+		if (user_payload_iter->data.payload_data[0] ==
+				user_payload_kernel->data.payload_data[0] &&
+			user_payload_iter->data.payload_data[1] ==
+				user_payload_kernel->data.payload_data[1]) {
+			pr_err("callback already registered on 0x%x\n",
+				row->synx_obj);
+			spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+			kfree(user_payload_kernel);
+			return -EALREADY;
+		}
+	}
+
+	list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	pr_debug("Exit %s\n", __func__);
+	return 0;
+}
+
+static int synx_handle_deregister_user_payload(
+	struct synx_private_ioctl_arg *k_ioctl)
+{
+	u32 state = SYNX_STATE_INVALID;
+	struct synx_client *client = NULL;
+	struct synx_userpayload_info userpayload_info;
+	struct synx_cb_data *user_payload_kernel, *temp;
+	struct synx_table_row *row = NULL;
+	struct synx_user_payload *data = NULL;
+	u32 match_found = 0;
+
+	pr_debug("Enter %s\n", __func__);
+	if (k_ioctl->size != sizeof(userpayload_info))
+		return -EINVAL;
+
+	if (copy_from_user(&userpayload_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	row = synx_from_handle(userpayload_info.synx_obj);
+	if (!row) {
+		pr_err("invalid synx: 0x%x\n", userpayload_info.synx_obj);
+		return -EINVAL;
+	}
+
+	mutex_lock(&synx_dev->table_lock);
+	client = get_current_client();
+	mutex_unlock(&synx_dev->table_lock);
+
+	if (!client) {
+		pr_err("couldn't find client for process %d\n", current->tgid);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	state = synx_status_locked(row);
+	list_for_each_entry_safe(user_payload_kernel, temp,
+			&row->user_payload_list, list) {
+		if (user_payload_kernel->data.payload_data[0] ==
+				userpayload_info.payload[0] &&
+				user_payload_kernel->data.payload_data[1] ==
+				userpayload_info.payload[1]) {
+			list_del_init(&user_payload_kernel->list);
+			match_found = 1;
+			pr_debug("registered callback removed\n");
+			break;
+		}
+	}
+
+	spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+
+	if (match_found)
+		kfree(user_payload_kernel);
+
+	/* registration of cancellation cb */
+	if (userpayload_info.payload[2] != 0) {
+		user_payload_kernel = kzalloc(sizeof(
+							*user_payload_kernel),
+							GFP_KERNEL);
+		if (!user_payload_kernel)
+			return -ENOMEM;
+
+		data = &user_payload_kernel->data;
+		memcpy(data->payload_data,
+			userpayload_info.payload,
+			SYNX_PAYLOAD_WORDS * sizeof(__u64));
+
+		user_payload_kernel->client = client;
+		data->synx_obj = row->synx_obj;
+		data->status = SYNX_CALLBACK_RESULT_CANCELED;
+
+		spin_lock_bh(&client->eventq_lock);
+		list_add_tail(&user_payload_kernel->list, &client->eventq);
+		spin_unlock_bh(&client->eventq_lock);
+		pr_debug("registered cancellation callback\n");
+		wake_up_all(&client->wq);
+	}
+
+	pr_debug("Exit %s\n", __func__);
+	return 0;
+}
+
+static int synx_handle_bind(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_bind synx_bind_info;
+
+	if (k_ioctl->size != sizeof(synx_bind_info))
+		return -EINVAL;
+
+	if (copy_from_user(&synx_bind_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	pr_debug("calling synx_bind: 0x%x, %d\n", synx_bind_info.synx_obj,
+		synx_bind_info.ext_sync_desc);
+	k_ioctl->result = synx_bind(synx_bind_info.synx_obj,
+		synx_bind_info.ext_sync_desc);
+
+	return k_ioctl->result;
+}
+
+static int synx_handle_addrefcount(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_addrefcount addrefcount_info;
+
+	if (k_ioctl->size != sizeof(addrefcount_info))
+		return -EINVAL;
+
+	if (copy_from_user(&addrefcount_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	pr_debug("calling synx_addrefcount: 0x%x, %d\n",
+		addrefcount_info.synx_obj, addrefcount_info.count);
+	k_ioctl->result = synx_addrefcount(addrefcount_info.synx_obj,
+		addrefcount_info.count);
+
+	return k_ioctl->result;
+}
+
+static int synx_handle_release(struct synx_private_ioctl_arg *k_ioctl)
+{
+	struct synx_info info;
+
+	if (k_ioctl->size != sizeof(info))
+		return -EINVAL;
+
+	if (copy_from_user(&info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	return synx_release(info.synx_obj);
+}
+
+static struct synx_device *get_synx_device(struct file *filep)
+{
+	struct synx_client *client = filep->private_data;
+
+	return client->device;
+}
+
+static long synx_ioctl(struct file *filep,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	s32 rc = 0;
+	struct synx_device *synx_dev = NULL;
+	struct synx_private_ioctl_arg k_ioctl;
+
+	pr_debug("Enter %s\n", __func__);
+
+	synx_dev = get_synx_device(filep);
+
+	if (cmd != SYNX_PRIVATE_IOCTL_CMD) {
+		pr_err("invalid ioctl cmd\n");
+		return -ENOIOCTLCMD;
+	}
+
+	if (copy_from_user(&k_ioctl,
+		(struct synx_private_ioctl_arg *)arg,
+		sizeof(k_ioctl))) {
+		pr_err("invalid ioctl args\n");
+		return -EFAULT;
+	}
+
+	if (!k_ioctl.ioctl_ptr)
+		return -EINVAL;
+
+	switch (k_ioctl.id) {
+	case SYNX_CREATE:
+		rc = synx_handle_create(&k_ioctl);
+		break;
+	case SYNX_RELEASE:
+		rc = synx_handle_release(&k_ioctl);
+		break;
+	case SYNX_REGISTER_PAYLOAD:
+		rc = synx_handle_register_user_payload(
+			&k_ioctl);
+		break;
+	case SYNX_DEREGISTER_PAYLOAD:
+		rc = synx_handle_deregister_user_payload(
+			&k_ioctl);
+		break;
+	case SYNX_SIGNAL:
+		rc = synx_handle_signal(&k_ioctl);
+		break;
+	case SYNX_MERGE:
+		rc = synx_handle_merge(&k_ioctl);
+		break;
+	case SYNX_WAIT:
+		rc = synx_handle_wait(&k_ioctl);
+		if (copy_to_user((void *)arg,
+			&k_ioctl,
+			sizeof(k_ioctl))) {
+			pr_err("invalid ioctl args\n");
+			rc = -EFAULT;
+		}
+		break;
+	case SYNX_BIND:
+		rc = synx_handle_bind(&k_ioctl);
+		break;
+	case SYNX_ADDREFCOUNT:
+		rc = synx_handle_addrefcount(&k_ioctl);
+		break;
+	case SYNX_GETSTATUS:
+		rc = synx_handle_getstatus(&k_ioctl);
+		break;
+	case SYNX_IMPORT:
+		rc = synx_handle_import(&k_ioctl);
+		break;
+	case SYNX_EXPORT:
+		rc = synx_handle_export(&k_ioctl);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	pr_debug("Exit %s\n", __func__);
+	return rc;
+}
+
+static ssize_t synx_read(struct file *filep,
+	char __user *buf, size_t size, loff_t *f_pos)
+{
+	ssize_t rc = 0;
+	struct synx_client *client = NULL;
+	struct synx_cb_data *user_payload_kernel;
+
+	pr_debug("Enter %s\n", __func__);
+
+	client = filep->private_data;
+
+	if (size != sizeof(struct synx_user_payload)) {
+		pr_err("invalid read size\n");
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&client->eventq_lock);
+	user_payload_kernel = list_first_entry_or_null(
+							&client->eventq,
+							struct synx_cb_data,
+							list);
+	if (!user_payload_kernel) {
+		spin_unlock_bh(&client->eventq_lock);
+		return 0;
+	}
+	list_del_init(&user_payload_kernel->list);
+	spin_unlock_bh(&client->eventq_lock);
+
+	rc = size;
+	if (copy_to_user(buf,
+			&user_payload_kernel->data,
+			sizeof(struct synx_user_payload))) {
+		pr_err("couldn't copy user callback data\n");
+		rc = -EFAULT;
+	}
+	kfree(user_payload_kernel);
+
+	pr_debug("Exit %s\n", __func__);
+	return rc;
+}
+
+static unsigned int synx_poll(struct file *filep,
+	struct poll_table_struct *poll_table)
+{
+	int rc = 0;
+	struct synx_client *client = NULL;
+
+	pr_debug("Enter %s\n", __func__);
+
+	client = filep->private_data;
+
+	poll_wait(filep, &client->wq, poll_table);
+	spin_lock_bh(&client->eventq_lock);
+	/* if list has pending cb events, notify */
+	if (!list_empty(&client->eventq))
+		rc = POLLPRI;
+	spin_unlock_bh(&client->eventq_lock);
+
+	pr_debug("Exit %s\n", __func__);
+
+	return rc;
+}
+
+static int synx_open(struct inode *inode, struct file *filep)
+{
+	struct synx_device *synx_dev = NULL;
+	struct synx_client *client = NULL;
+
+	pr_debug("Enter %s from pid: %d\n", __func__, current->tgid);
+
+	synx_dev = container_of(inode->i_cdev, struct synx_device, cdev);
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	client->device = synx_dev;
+	client->pid = current->tgid;
+	init_waitqueue_head(&client->wq);
+	INIT_LIST_HEAD(&client->eventq);
+	spin_lock_init(&client->eventq_lock);
+
+	mutex_lock(&synx_dev->table_lock);
+	list_add_tail(&client->list, &synx_dev->client_list);
+	synx_dev->open_cnt++;
+	mutex_unlock(&synx_dev->table_lock);
+
+	filep->private_data = client;
+
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+}
+
+static int synx_close(struct file *filep, fl_owner_t id)
+{
+	int rc = 0;
+	int i;
+	struct synx_device *synx_dev = NULL;
+	struct synx_client *client, *tmp_client;
+
+	pr_debug("Enter %s\n", __func__);
+
+	synx_dev = get_synx_device(filep);
+
+	mutex_lock(&synx_dev->table_lock);
+
+	synx_dev->open_cnt--;
+	if (!synx_dev->open_cnt) {
+		for (i = 1; i < SYNX_MAX_OBJS; i++) {
+			struct synx_table_row *row =
+				synx_dev->synx_table + i;
+			/*
+			 * signal all ACTIVE objects as ERR, but we don't care
+			 * about the return status here apart from logging it.
+			 */
+			if (row->synx_obj && !is_merged_synx(row) &&
+				(synx_status(row) == SYNX_STATE_ACTIVE)) {
+				pr_debug("synx 0x%x still active at shutdown\n",
+					row->synx_obj);
+				rc = synx_signal(row->synx_obj,
+					SYNX_STATE_SIGNALED_ERROR);
+				if (rc < 0)
+					pr_err("cleanup signal fail idx:0x%x\n",
+						row->synx_obj);
+			}
+		}
+
+		/*
+		 * flush the work queue to wait for pending signal callbacks
+		 * to finish
+		 */
+		flush_workqueue(synx_dev->work_queue);
+
+		/*
+		 * now that all objs have been signaled,
+		 * destroy them
+		 */
+		for (i = 1; i < SYNX_MAX_OBJS; i++) {
+			struct synx_table_row *row =
+				synx_dev->synx_table + i;
+
+			if (row->synx_obj) {
+				rc = synx_release(row->synx_obj);
+				if (rc < 0) {
+					pr_err("cleanup destroy fail idx:0x%x\n",
+						row->synx_obj);
+				}
+			}
+		}
+	}
+
+	list_for_each_entry_safe(client, tmp_client,
+		&synx_dev->client_list, list) {
+		if (current->tgid == client->pid) {
+			pr_debug("deleting client for process %d\n",
+				client->pid);
+			list_del_init(&client->list);
+			kfree(client);
+			break;
+		}
+	}
+
+	mutex_unlock(&synx_dev->table_lock);
+
+	pr_debug("Exit %s\n", __func__);
+
+	return 0;
+}
+
+static const struct file_operations synx_fops = {
+	.owner = THIS_MODULE,
+	.open  = synx_open,
+	.read  = synx_read,
+	.flush = synx_close,
+	.poll  = synx_poll,
+	.unlocked_ioctl = synx_ioctl,
+};
+
+#ifdef CONFIG_SPECTRA_CAMERA
+static void synx_bind_ops_csl_type(struct bind_operations *vtbl)
+{
+	if (!vtbl)
+		return;
+
+	vtbl->register_callback = cam_sync_register_callback;
+	vtbl->deregister_callback = cam_sync_deregister_callback;
+	vtbl->signal = cam_sync_signal;
+
+	pr_debug("csl bind functionality set\n");
+}
+#else
+static void synx_bind_ops_csl_type(struct bind_operations *vtbl)
+{
+	pr_debug("csl bind functionality not available\n");
+}
+#endif
+
+static void synx_bind_ops_register(struct synx_device *synx_dev)
+{
+	u32 i;
+
+	for (i = 0; i < SYNX_MAX_BIND_TYPES; i++) {
+		switch (i) {
+		case SYNX_TYPE_CSL:
+			synx_bind_ops_csl_type(&synx_dev->bind_vtbl[i]);
+			break;
+		default:
+			pr_err("invalid external sync type\n");
+		}
+	}
+}
+
+static int __init synx_init(void)
+{
+	int rc;
+	int idx;
+
+	pr_info("synx device init start\n");
+
+	synx_dev = kzalloc(sizeof(*synx_dev), GFP_KERNEL);
+	if (!synx_dev)
+		return -ENOMEM;
+
+	mutex_init(&synx_dev->table_lock);
+
+	for (idx = 0; idx < SYNX_MAX_OBJS; idx++)
+		spin_lock_init(&synx_dev->row_spinlocks[idx]);
+
+	idr_init(&synx_dev->synx_ids);
+
+	rc = alloc_chrdev_region(&synx_dev->dev, 0, 1, SYNX_DEVICE_NAME);
+	if (rc < 0) {
+		pr_err("region allocation failed\n");
+		goto alloc_fail;
+	}
+
+	cdev_init(&synx_dev->cdev, &synx_fops);
+	synx_dev->cdev.owner = THIS_MODULE;
+	rc = cdev_add(&synx_dev->cdev, synx_dev->dev, 1);
+	if (rc < 0) {
+		pr_err("device registation failed\n");
+		goto reg_fail;
+	}
+
+	synx_dev->class = class_create(THIS_MODULE, SYNX_DEVICE_NAME);
+	device_create(synx_dev->class, NULL, synx_dev->dev,
+		NULL, SYNX_DEVICE_NAME);
+
+	/*
+	 * we treat zero as invalid handle, so we will keep the 0th bit set
+	 * always
+	 */
+	set_bit(0, synx_dev->bitmap);
+
+	synx_dev->work_queue = alloc_workqueue(SYNX_WORKQUEUE_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, 1);
+	if (!synx_dev->work_queue) {
+		pr_err("high priority work queue creation failed\n");
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&synx_dev->client_list);
+	synx_dev->dma_context = dma_fence_context_alloc(1);
+
+	synx_bind_ops_register(synx_dev);
+
+	pr_info("synx device init success\n");
+
+	return 0;
+
+fail:
+	device_destroy(synx_dev->class, synx_dev->dev);
+	class_destroy(synx_dev->class);
+reg_fail:
+	unregister_chrdev_region(synx_dev->dev, 1);
+alloc_fail:
+	mutex_destroy(&synx_dev->table_lock);
+	idr_destroy(&synx_dev->synx_ids);
+	kfree(synx_dev);
+	return rc;
+}
+
+device_initcall(synx_init);
+
+MODULE_DESCRIPTION("Global Synx Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/synx/synx_api.h b/drivers/media/platform/msm/synx/synx_api.h
new file mode 100644
index 0000000..579f542
--- /dev/null
+++ b/drivers/media/platform/msm/synx/synx_api.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SYNX_API_H__
+#define __SYNX_API_H__
+
+#include <linux/list.h>
+#include <uapi/media/synx.h>
+
+typedef void (*synx_callback)(s32 sync_obj, int status, void *data);
+
+/* Kernel APIs */
+
+/**
+ * @brief: Creates a synx object
+ *
+ *  The newly created synx obj is assigned to synx_obj.
+ *
+ * @param synx_obj : Pointer to synx object handle (filled by the function)
+ * @param name     : Optional parameter associating a name with the synx
+ *                   object for debug purposes.
+ *                   Only first SYNC_DEBUG_NAME_LEN bytes are accepted,
+ *                   rest will be ignored.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if synx_obj is an invalid pointer.
+ * -ENOMEM will be returned if the kernel can't allocate space for
+ * synx object.
+ */
+int synx_create(s32 *synx_obj, const char *name);
+
+/**
+ * @brief: Registers a callback with a synx object
+ *
+ * @param synx_obj : int referencing the synx object.
+ * @param userdata : Opaque pointer passed back with callback.
+ * @param cb_func  : Pointer to callback to be registered
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ *
+ */
+int synx_register_callback(s32 synx_obj,
+	void *userdata, synx_callback cb_func);
+
+/**
+ * @brief: De-registers a callback with a synx object
+ *
+ * @param synx_obj       : int referencing the synx object.
+ * @param cb_func        : Pointer to callback to be de-registered
+ * @param userdata       : Opaque pointer passed back with callback.
+ * @param cancel_cb_func : Pointer to callback to ack de-registration
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ */
+int synx_deregister_callback(s32 synx_obj,
+	synx_callback cb_func,
+	void *userdata,
+	synx_callback cancel_cb_func);
+
+/**
+ * @brief: Signals a synx object with the status argument.
+ *
+ * This function will signal the synx object referenced by the synx_obj
+ * param and invoke any external binding synx objs.
+ * The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param synx_obj : Synx object handle
+ * @param status   : Status of signaling. Value : SYNX_STATE_SIGNALED_SUCCESS or
+ *                   SYNX_STATE_SIGNALED_ERROR.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_signal(s32 synx_obj, u32 status);
+
+/**
+ * @brief: Merges multiple synx objects
+ *
+ * This function will merge multiple synx objects into a synx group.
+ *
+ * @param synxs    : Pointer to a block of synx handles to be merged
+ * @param num_objs : Number of synx objs in the block
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_merge(s32 *synx_obj, u32 num_objs, s32 *merged_obj);
+
+/**
+ * @brief: Waits for a synx object synchronously
+ *
+ * Does a wait on the synx object identified by synx_obj for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep. Should be called from process context only.
+ *
+ * @param synx_obj      : Synx object handle to be waited upon
+ * @timeout_ms synx_obj : Timeout in ms.
+ *
+ * @return 0 upon success, -EINVAL if synx object is in bad state or arguments
+ * are invalid, -ETIMEDOUT if wait times out.
+ */
+int synx_wait(s32 synx_obj, u64 timeout_ms);
+
+/**
+ * @brief: Binds two synx objects
+ *
+ * Binding two synx objects will unify them in a way that if one
+ * of the signals, the other ones is signaled as well.
+ *
+ * @param synx_obj      : Synx object handle
+ * @param external_sync : External synx descriptor to bind to object
+ *
+ * @return 0 upon success, -EINVAL if synx object is in bad state or arguments
+ * are invalid, -ETIMEDOUT if wait times out.
+ */
+int synx_bind(s32 synx_obj,
+	struct synx_external_desc external_sync);
+
+/**
+ * @brief: return the status of the synx object
+ *
+ * @param synx_obj : Synx object handle
+ *
+ * @return status of the synx object
+ */
+int synx_get_status(s32 synx_obj);
+
+/**
+ * @brief: Adds to the reference count of a synx object
+ *
+ * When a synx object is created, the refcount will be 1.
+ *
+ * @param synx_obj : Synx object handle
+ * @param count    : Count to add to the refcount
+ *
+ * @return 0 upon success, -EINVAL if synx object is in bad
+ *         state
+ */
+int synx_addrefcount(s32 synx_obj, s32 count);
+
+/**
+ * @brief: Imports (looks up) a synx object from a given ID
+ *
+ * The given ID should have been exported by another client
+ * and provided.
+ *
+ * @param synx_obj     : Synx object handle to import
+ * @param secure_key   : Key to verify authenticity
+ * @param new_synx_obj : Pointer to newly created synx object
+ *
+ * @return 0 upon success, -EINVAL if synx object is bad
+ */
+int synx_import(s32 synx_obj, u32 secure_key, s32 *new_synx_obj);
+
+/**
+ * @brief: Exports a synx object and returns an ID
+ *
+ *  The given ID may be passed to other clients to be
+ *  imported.
+ *
+ * @param synx_obj   : Synx object handle to export
+ * @param secure_key : POinter to gnerated secure key
+ *
+ * @return 0 upon success, -EINVAL if the ID is bad
+ */
+int synx_export(s32 synx_obj, u32 *secure_key);
+
+/**
+ * @brief: Decrements refcount of a synx object, and destroys it
+ *         if becomes 0
+ *
+ * @param synx_obj: Synx object handle to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_release(s32 synx_obj);
+
+#endif /* __SYNX_API_H__ */
diff --git a/drivers/media/platform/msm/synx/synx_private.h b/drivers/media/platform/msm/synx/synx_private.h
new file mode 100644
index 0000000..36baec4
--- /dev/null
+++ b/drivers/media/platform/msm/synx/synx_private.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SYNX_PRIVATE_H__
+#define __SYNX_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/cdev.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+
+#define SYNX_OBJ_NAME_LEN           64
+#define SYNX_MAX_OBJS               1024
+#define SYNX_MAX_REF_COUNTS         2048
+#define SYNX_PAYLOAD_WORDS          4
+#define SYNX_NAME                   "synx"
+#define SYNX_WORKQUEUE_NAME         "hiprio_synx_work_queue"
+#define SYNX_MAX_NUM_BINDINGS       8
+#define SYNX_DEVICE_NAME            "synx_device"
+
+/**
+ * struct synx_external_data - data passed over to external sync objects
+ * to pass on callback
+ *
+ * @synx_obj    : synx obj id
+ * @secure_key  : secure key for authentication
+ */
+struct synx_external_data {
+	s32 synx_obj;
+	u32 secure_key;
+};
+
+/**
+ * struct synx_bind_desc - bind payload descriptor
+ *
+ * @external_desc : external bind information
+ * @bind_data     : pointer to data passed over
+ */
+struct synx_bind_desc {
+	struct synx_external_desc external_desc;
+	struct synx_external_data *external_data;
+};
+
+/**
+ * struct synx_callback_info - Single node of information about a kernel
+ * callback registered on a sync object
+ *
+ * @callback_func    : Callback function, registered by client driver
+ * @cb_data          : Callback data, registered by client driver
+ * @status           : Status with which callback will be invoked in client
+ * @synx_obj         : Sync id of the object for which callback is registered
+ * @cb_dispatch_work : Work representing the call dispatch
+ * @list             : List member used to append this node to a linked list
+ */
+struct synx_callback_info {
+	synx_callback callback_func;
+	void *cb_data;
+	int status;
+	s32 synx_obj;
+	struct work_struct cb_dispatch_work;
+	struct list_head list;
+};
+
+struct synx_client;
+
+/**
+ * struct synx_user_payload - Single node of information about a callback
+ * registered from user space
+ *
+ * @synx_obj     : Global id
+ * @status       : synx obj status or callback failure
+ * @payload_data : Payload data, opaque to kernel
+ */
+struct synx_user_payload {
+	s32 synx_obj;
+	int status;
+	u64 payload_data[SYNX_PAYLOAD_WORDS];
+};
+
+/**
+ * struct synx_cb_data - Single node of information about a user space
+ * payload registered from user space
+ *
+ * @client : Synx client
+ * @data   : Payload data, opaque to kernel
+ * @list   : List member used to append this node to user cb list
+ */
+struct synx_cb_data {
+	struct synx_client *client;
+	struct synx_user_payload data;
+	struct list_head list;
+};
+
+/**
+ * struct synx_table_row - Single row of information about a synx object, used
+ * for internal book keeping in the synx driver
+ *
+ * @name              : Optional string representation of the synx object
+ * @fence             : dma fence backing the synx object
+ * @synx_obj          : Integer id representing this synx object
+ * @index             : Index of the spin lock table associated with synx obj
+ * @num_bound_synxs   : Number of external bound synx objects
+ * @signaling_id      : ID of the external sync object invoking the callback
+ * @secure_key        : Secure key generated for authentication
+ * @bound_synxs       : Array of bound synx objects
+ * @callback_list     : Linked list of kernel callbacks registered
+ * @user_payload_list : Linked list of user space payloads registered
+ */
+struct synx_table_row {
+	char name[SYNX_OBJ_NAME_LEN];
+	struct dma_fence *fence;
+	s32 synx_obj;
+	s32 index;
+	u32 num_bound_synxs;
+	s32 signaling_id;
+	u32 secure_key;
+	struct synx_bind_desc bound_synxs[SYNX_MAX_NUM_BINDINGS];
+	struct list_head callback_list;
+	struct list_head user_payload_list;
+};
+
+/**
+ * struct bind_operations - Function pointers that need to be defined
+ *    to achieve bind functionality for external fence with synx obj
+ *
+ * @register_callback   : Function to register with external sync object
+ * @deregister_callback : Function to deregister with external sync object
+ * @signal              : Function to signal the external sync object
+ */
+struct bind_operations {
+	int (*register_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*deregister_callback)(synx_callback cb_func,
+		void *userdata, s32 sync_obj);
+	int (*signal)(s32 sync_obj, u32 status);
+};
+
+/**
+ * struct synx_device - Internal struct to book keep synx driver details
+ *
+ * @cdev          : Character device
+ * @dev           : Device type
+ * @class         : Device class
+ * @synx_table    : Table of all synx objects
+ * @row_spinlocks : Spinlock array, one for each row in the table
+ * @table_lock    : Mutex used to lock the table
+ * @open_cnt      : Count of file open calls made on the synx driver
+ * @work_queue    : Work queue used for dispatching kernel callbacks
+ * @bitmap        : Bitmap representation of all synx objects
+ * synx_ids       : Global unique ids
+ * dma_context    : dma context id
+ * bind_vtbl      : Table with bind ops for supported external sync objects
+ * client_list    : All the synx clients
+ */
+struct synx_device {
+	struct cdev cdev;
+	dev_t dev;
+	struct class *class;
+	struct synx_table_row synx_table[SYNX_MAX_OBJS];
+	spinlock_t row_spinlocks[SYNX_MAX_OBJS];
+	struct mutex table_lock;
+	int open_cnt;
+	struct workqueue_struct *work_queue;
+	DECLARE_BITMAP(bitmap, SYNX_MAX_OBJS);
+	struct idr synx_ids;
+	u64 dma_context;
+	struct bind_operations bind_vtbl[SYNX_MAX_BIND_TYPES];
+	struct list_head client_list;
+};
+
+/**
+ * struct synx_client - Internal struct to book keep each client
+ * specific details
+ *
+ * @device      : Pointer to synx device structure
+ * @pid         : Process id
+ * @eventq_lock : Spinlock for the event queue
+ * @wq          : Queue for the polling process
+ * @eventq      : All the user callback payloads
+ * @list        : List member used to append this node to client_list
+ */
+struct synx_client {
+	struct synx_device *device;
+	int pid;
+	spinlock_t eventq_lock;
+	wait_queue_head_t wq;
+	struct list_head eventq;
+	struct list_head list;
+};
+
+#endif /* __SYNX_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
new file mode 100644
index 0000000..c72fac9
--- /dev/null
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#define pr_fmt(fmt) "synx: " fmt
+
+#include <linux/slab.h>
+#include <linux/random.h>
+
+#include "synx_api.h"
+#include "synx_util.h"
+
+bool is_valid_type(u32 type)
+{
+	if (type < SYNX_MAX_BIND_TYPES)
+		return true;
+
+	return false;
+}
+
+int synx_init_object(struct synx_table_row *table,
+	u32 idx,
+	s32 id,
+	const char *name,
+	struct dma_fence_ops *ops)
+{
+	struct dma_fence *fence = NULL;
+	struct synx_table_row *row = table + idx;
+
+	if (!table || idx <= 0 || idx >= SYNX_MAX_OBJS)
+		return -EINVAL;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return -ENOMEM;
+
+	dma_fence_init(fence, ops, &synx_dev->row_spinlocks[idx],
+		synx_dev->dma_context, 1);
+
+	row->fence = fence;
+	row->synx_obj = id;
+	row->index = idx;
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
+
+	if (name)
+		strlcpy(row->name, name, sizeof(row->name));
+
+	pr_debug("synx obj init: id:0x%x state:%u fence: 0x%pK\n",
+		row->synx_obj, synx_status_locked(row), fence);
+
+	return 0;
+}
+
+int synx_init_group_object(struct synx_table_row *table,
+	u32 idx,
+	s32 id,
+	struct dma_fence **fences,
+	u32 num_objs)
+{
+	struct synx_table_row *row = table + idx;
+	struct dma_fence_array *array;
+
+	array = dma_fence_array_create(num_objs,
+				fences, synx_dev->dma_context, 1, false);
+	if (!array)
+		return -EINVAL;
+
+	row->fence = &array->base;
+	row->synx_obj = id;
+	row->index = idx;
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
+
+	pr_debug("synx group obj init: id:0x%x state:%u fence: 0x%pK\n",
+		row->synx_obj, synx_status_locked(row), row->fence);
+
+	return 0;
+}
+
+void synx_callback_dispatch(struct synx_table_row *row)
+{
+	u32 state = SYNX_STATE_INVALID;
+	struct synx_client *client = NULL;
+	struct synx_callback_info *synx_cb, *temp_synx_cb;
+	struct synx_cb_data *payload_info, *temp_payload_info;
+
+	if (!row)
+		return;
+
+	state = synx_status_locked(row);
+
+	/* dispatch the kernel callbacks registered (if any) */
+	list_for_each_entry_safe(synx_cb,
+		temp_synx_cb, &row->callback_list, list) {
+		synx_cb->status = state;
+		list_del_init(&synx_cb->list);
+		queue_work(synx_dev->work_queue,
+			&synx_cb->cb_dispatch_work);
+		pr_debug("dispatched kernel cb\n");
+	}
+
+	/* add user payloads to eventq */
+	list_for_each_entry_safe(payload_info, temp_payload_info,
+		&row->user_payload_list, list) {
+		payload_info->data.status = state;
+		client = payload_info->client;
+		if (!client) {
+			pr_err("invalid client member in cb list\n");
+			continue;
+		}
+		spin_lock_bh(&client->eventq_lock);
+		list_move_tail(&payload_info->list, &client->eventq);
+		spin_unlock_bh(&client->eventq_lock);
+		/*
+		 * since cb can be registered by multiple clients,
+		 * wake the process right away
+		 */
+		wake_up_all(&client->wq);
+		pr_debug("dispatched user cb\n");
+	}
+}
+
+int synx_activate(struct synx_table_row *row)
+{
+	if (!row)
+		return -EINVAL;
+
+	/* move synx to ACTIVE state and register cb */
+	dma_fence_enable_sw_signaling(row->fence);
+
+	return 0;
+}
+
+int synx_deinit_object(struct synx_table_row *row)
+{
+	s32 synx_obj;
+	struct synx_callback_info *synx_cb, *temp_cb;
+	struct synx_cb_data  *upayload_info, *temp_upayload;
+
+	if (!row)
+		return -EINVAL;
+
+	synx_obj = row->synx_obj;
+
+	if ((struct synx_table_row *)idr_replace(&synx_dev->synx_ids,
+			NULL, row->synx_obj) != row)
+		pr_err("replacing data in idr table failed\n");
+
+	/*
+	 * release the fence memory only for individual obj.
+	 * dma fence array will release all the allocated mem
+	 * in its registered release function.
+	 */
+	if (!is_merged_synx(row))
+		kfree(row->fence);
+
+	list_for_each_entry_safe(upayload_info, temp_upayload,
+			&row->user_payload_list, list) {
+		pr_err("pending user callback payload\n");
+		list_del_init(&upayload_info->list);
+		kfree(upayload_info);
+	}
+
+	list_for_each_entry_safe(synx_cb, temp_cb,
+			&row->callback_list, list) {
+		pr_err("pending kernel callback payload\n");
+		list_del_init(&synx_cb->list);
+		kfree(synx_cb);
+	}
+
+	clear_bit(row->index, synx_dev->bitmap);
+	memset(row, 0, sizeof(*row));
+
+	pr_debug("destroying synx obj: 0x%x successful\n", synx_obj);
+	return 0;
+}
+
+u32 synx_add_reference(struct dma_fence *fence)
+{
+	u32 count = 0;
+	u32 i = 0;
+	struct dma_fence_array *array = NULL;
+
+	/* obtain dma fence reference */
+	if (dma_fence_is_array(fence)) {
+		array = to_dma_fence_array(fence);
+		if (!array)
+			return 0;
+
+		for (i = 0; i < array->num_fences; i++)
+			dma_fence_get(array->fences[i]);
+
+		count = array->num_fences;
+	} else {
+		dma_fence_get(fence);
+		count = 1;
+	}
+
+	return count;
+}
+
+void synx_release_reference(struct dma_fence *fence)
+{
+	struct dma_fence_array *array = NULL;
+	u32 i = 0;
+
+	if (dma_fence_is_array(fence)) {
+		array = to_dma_fence_array(fence);
+		if (!array)
+			return;
+
+		for (i = 0; i < array->num_fences; i++)
+			dma_fence_put(array->fences[i]);
+	} else {
+		dma_fence_put(fence);
+	}
+}
+
+u32 synx_fence_add(struct dma_fence *fence,
+	struct dma_fence **fences,
+	u32 idx)
+{
+	struct dma_fence_array *array = NULL;
+	u32 i = 0;
+
+	if (dma_fence_is_array(fence)) {
+		array = to_dma_fence_array(fence);
+		if (!array)
+			return 0;
+
+		for (i = 0; i < array->num_fences; i++)
+			fences[idx+i] = array->fences[i];
+
+		return array->num_fences;
+	}
+
+	fences[idx] = fence;
+	return 1;
+}
+
+u32 synx_remove_duplicates(struct dma_fence **arr, u32 num)
+{
+	int i, j;
+	u32 wr_idx = 1;
+
+	if (!arr) {
+		pr_err("invalid input array\n");
+		return 0;
+	}
+
+	for (i = 1; i < num; i++) {
+		for (j = 0; j < wr_idx ; j++) {
+			if (arr[i] == arr[j]) {
+				/* release reference obtained for duplicate */
+				dma_fence_put(arr[i]);
+				break;
+			}
+		}
+		if (j == wr_idx)
+			arr[wr_idx++] = arr[i];
+	}
+
+	return wr_idx;
+}
+
+s32 synx_merge_error(s32 *synx_objs, u32 num_objs)
+{
+	struct synx_table_row *row = NULL;
+	u32 i = 0;
+
+	if (!synx_objs)
+		return -EINVAL;
+
+	for (i = 0; i < num_objs; i++) {
+		row = (struct synx_table_row *)synx_from_handle(synx_objs[i]);
+		if (!row) {
+			pr_err("invalid handle 0x%x\n", synx_objs[i]);
+			return -EINVAL;
+		}
+
+		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		synx_release_reference(row->fence);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	}
+
+	return 0;
+}
+
+int synx_util_validate_merge(s32 *synx_objs,
+	u32 num_objs,
+	struct dma_fence ***fence_list,
+	u32 *fence_cnt)
+{
+	u32 count = 0;
+	u32 i = 0;
+	struct synx_table_row *row = NULL;
+	struct dma_fence **fences = NULL;
+
+	if (num_objs <= 1) {
+		pr_err("single object merge is not allowed\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_objs; i++) {
+		row = (struct synx_table_row *)synx_from_handle(synx_objs[i]);
+		if (!row) {
+			pr_err("invalid handle 0x%x\n", synx_objs[i]);
+			*fence_cnt = i;
+			return -EINVAL;
+		}
+
+		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		count += synx_add_reference(row->fence);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	}
+
+	fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
+	if (!fences) {
+		*fence_cnt = num_objs;
+		return -ENOMEM;
+	}
+
+	*fence_list = fences;
+	count = 0;
+
+	for (i = 0; i < num_objs; i++) {
+		row = (struct synx_table_row *)synx_from_handle(synx_objs[i]);
+		if (!row) {
+			*fence_cnt = num_objs;
+			return -EINVAL;
+		}
+
+		spin_lock_bh(&synx_dev->row_spinlocks[row->index]);
+		count += synx_fence_add(row->fence, fences, count);
+		spin_unlock_bh(&synx_dev->row_spinlocks[row->index]);
+	}
+
+	/* eliminate duplicates */
+	*fence_cnt = synx_remove_duplicates(fences, count);
+	return 0;
+}
+
+void synx_util_cb_dispatch(struct work_struct *cb_dispatch_work)
+{
+	struct synx_callback_info *cb_info = container_of(cb_dispatch_work,
+		struct synx_callback_info,
+		cb_dispatch_work);
+
+	cb_info->callback_func(cb_info->synx_obj,
+		cb_info->status,
+		cb_info->cb_data);
+
+	kfree(cb_info);
+}
+
+bool is_merged_synx(struct synx_table_row *row)
+{
+	if (!row)
+		return false;
+
+	if (dma_fence_is_array(row->fence))
+		return true;
+
+	return false;
+}
+
+u32 __fence_state(struct dma_fence *fence, bool locked)
+{
+	s32 status;
+	u32 state = SYNX_STATE_INVALID;
+
+	if (locked)
+		status = dma_fence_get_status_locked(fence);
+	else
+		status = dma_fence_get_status(fence);
+
+	/* convert fence status to synx state */
+	switch (status) {
+	case 0:
+		state = SYNX_STATE_ACTIVE;
+		break;
+	case 1:
+		state = SYNX_STATE_SIGNALED_SUCCESS;
+		break;
+	default:
+		state = SYNX_STATE_SIGNALED_ERROR;
+	}
+
+	return state;
+}
+
+u32 __fence_group_state(struct dma_fence *fence, bool locked)
+{
+	u32 i = 0;
+	u32 state = SYNX_STATE_INVALID;
+	struct dma_fence_array *array = to_dma_fence_array(fence);
+	u32 intr, actv_cnt, sig_cnt, err_cnt;
+
+	actv_cnt = sig_cnt = err_cnt = 0;
+
+	if (!array)
+		return SYNX_STATE_INVALID;
+
+	for (i = 0; i < array->num_fences; i++) {
+		intr = __fence_state(array->fences[i], locked);
+		switch (intr) {
+		case SYNX_STATE_ACTIVE:
+			actv_cnt++;
+			break;
+		case SYNX_STATE_SIGNALED_SUCCESS:
+			sig_cnt++;
+			break;
+		default:
+			err_cnt++;
+		}
+	}
+
+	pr_debug("group cnt stats act:%u, sig: %u, err: %u\n",
+		actv_cnt, sig_cnt, err_cnt);
+
+	if (err_cnt)
+		state = SYNX_STATE_SIGNALED_ERROR;
+	else if (actv_cnt)
+		state = SYNX_STATE_ACTIVE;
+	else if (sig_cnt == array->num_fences)
+		state = SYNX_STATE_SIGNALED_SUCCESS;
+
+	return state;
+}
+
+/*
+ * WARN: Should not hold the synx spinlock when invoking
+ * this function. Use synx_fence_state_locked instead
+ */
+u32 synx_status(struct synx_table_row *row)
+{
+	u32 state;
+
+	if (!row)
+		return SYNX_STATE_INVALID;
+
+	if (is_merged_synx(row))
+		state = __fence_group_state(row->fence, false);
+	else
+		state = __fence_state(row->fence, false);
+
+	return state;
+}
+
+/* use this for status check when holding on to metadata spinlock */
+u32 synx_status_locked(struct synx_table_row *row)
+{
+	u32 state;
+
+	if (!row)
+		return SYNX_STATE_INVALID;
+
+	if (is_merged_synx(row))
+		state = __fence_group_state(row->fence, true);
+	else
+		state = __fence_state(row->fence, true);
+
+
+	return state;
+}
+
+void *synx_from_handle(s32 synx_obj)
+{
+	s32 base;
+	struct synx_table_row *row =
+		(struct synx_table_row *) idr_find(&synx_dev->synx_ids,
+		synx_obj);
+
+	if (!row) {
+		pr_err(
+		"synx handle does not exist 0x%x\n", synx_obj);
+		return NULL;
+	}
+
+	base = current->tgid << 16;
+
+	if ((base >> 16) != (synx_obj >> 16)) {
+		pr_err("current client: %d, base: %d, synx_obj: 0x%x\n",
+			current->tgid, base, synx_obj);
+		return NULL;
+	}
+
+	return row;
+}
+
+s32 synx_create_handle(void *pObj)
+{
+	s32 base = current->tgid << 16;
+	s32 id = idr_alloc(&synx_dev->synx_ids, pObj,
+					base, base + 0x10000, GFP_ATOMIC);
+
+	pr_debug("generated Id: 0x%x, base: 0x%x, client: 0x%x\n",
+		id, base, current->tgid);
+	return id;
+}
+
+struct synx_client *get_current_client(void)
+{
+	struct synx_client *client = NULL;
+
+	list_for_each_entry(client, &synx_dev->client_list, list) {
+		if (current->tgid == client->pid)
+			break;
+	}
+	return client;
+}
+
+int synx_generate_secure_key(struct synx_table_row *row)
+{
+	if (!row)
+		return -EINVAL;
+
+	get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+	return row->secure_key;
+}
+
+struct synx_table_row *synx_from_fence(struct dma_fence *fence)
+{
+	s32 idx = 0;
+	struct synx_table_row *row = NULL;
+	struct synx_table_row *table = synx_dev->synx_table;
+
+	if (!fence)
+		return NULL;
+
+	for (idx = 0; idx < SYNX_MAX_OBJS; idx++) {
+		if (table[idx].fence == fence) {
+			row = table + idx;
+			pr_debug("synx global data found for 0x%x\n",
+				row->synx_obj);
+			break;
+		}
+	}
+
+	return row;
+}
+
+void *synx_from_key(s32 id, u32 secure_key)
+{
+	struct synx_table_row *row = NULL;
+
+	row = (struct synx_table_row *) idr_find(&synx_dev->synx_ids, id);
+	if (!row) {
+		pr_err("invalid synx obj 0x%x\n", id);
+		return NULL;
+	}
+
+	if (row->secure_key != secure_key)
+		row = NULL;
+
+	return row;
+}
diff --git a/drivers/media/platform/msm/synx/synx_util.h b/drivers/media/platform/msm/synx/synx_util.h
new file mode 100644
index 0000000..e9d3173
--- /dev/null
+++ b/drivers/media/platform/msm/synx/synx_util.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SYNX_UTIL_H__
+#define __SYNX_UTIL_H__
+
+#include "synx_private.h"
+
+extern struct synx_device *synx_dev;
+
+/**
+ * @brief: Function to check if the external sync obj is valid
+ *
+ * @param type : External sync object type
+ *
+ * @return True if valid. False otherwise
+ */
+bool is_valid_type(u32 type);
+
+/**
+ * @brief: Function to initialize an empty row in the synx table.
+ *         It also initializes dma fence.
+ *         This should be called only for individual objects.
+ *
+ * @param table : Pointer to the synx objects table
+ * @param idx   : Index of row to initialize
+ * @param id    : Id associated with the object
+ * @param name  : Optional string representation of the synx object. Should be
+ *                63 characters or less
+ * @param ops   : dma fence ops required for fence initialization
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_init_object(struct synx_table_row *table,
+	u32 idx,
+	s32 id,
+	const char *name,
+	struct dma_fence_ops *ops);
+
+/**
+ * @brief: Function to uninitialize a row in the synx table.
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_deinit_object(struct synx_table_row *row);
+
+/**
+ * @brief: Function to initialize a row in the synx table when the object is a
+ *         a merged synx object. It also initializes dma fence array.
+ *
+ * @param table    : Pointer to the synx objects table
+ * @param idx      : Index of row to initialize
+ * @param id       : Id associated with the object
+ * @param fences   : Array of fence objects which will merged
+ *                   or grouped together to a fence array
+ * @param num_objs : Number of fence objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_init_group_object(struct synx_table_row *table,
+	u32 idx,
+	s32 id,
+	struct dma_fence **fences,
+	u32 num_objs);
+
+/**
+ * @brief: Function to activate the synx object. Moves the synx from INVALID
+ *         state to ACTIVE state.
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_activate(struct synx_table_row *row);
+
+/**
+ * @brief: Function to dispatch callbacks registered with
+ *         the synx object.
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void synx_callback_dispatch(struct synx_table_row *row);
+
+/**
+ * @brief: Function to handle error during group synx obj initialization.
+ *         Removes the references added on the fence objects.
+ *
+ * @param synx_objs : Array of synx objects to merge
+ * @param num_objs  : Number of synx obects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+s32 synx_merge_error(s32 *synx_objs, u32 num_objs);
+
+/**
+ * @brief: Function to validate synx merge arguments. It obtains the necessary
+ *         references to the fence objects and also removes duplicates (if any).
+ *
+ * @param synx_objs : Array of synx objects to merge
+ * @param num_objs  : Number of synx objects in the array
+ * @param fences    : Address to a list of dma fence* array
+ * @param fence_cnt : Number of fence objects in the fences array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int synx_util_validate_merge(s32 *synx_objs,
+	u32 num_objs,
+	struct dma_fence ***fences,
+	u32 *fence_cnt);
+
+/**
+ * @brief: Function to dispatch a kernel callback for a sync callback
+ *
+ * @param cb_dispatch_work : Pointer to the work_struct that needs to be
+ *                           dispatched
+ *
+ * @return None
+ */
+void synx_util_cb_dispatch(struct work_struct *cb_dispatch_work);
+
+/**
+ * @brief: Function to check if the synx is a merged (grouped) object
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return True if merged object. Otherwise false.
+ */
+bool is_merged_synx(struct synx_table_row *row);
+
+/**
+ * @brief: Function to check the state of synx object.
+ *         The row lock associated with the synx obj should not be
+ *         held when invoking this function.
+ *         Use synx_status_locked for state enquiry when holding lock.
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return Status of the synx object.
+ */
+u32 synx_status(struct synx_table_row *row);
+
+/**
+ * @brief: Function to check the state of synx object.
+ *         The row lock associated with the synx obj should be
+ *         held when invoking this function.
+ *         Use synx_status for state enquiry when not holding lock.
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return Status of the synx object.
+ */
+u32 synx_status_locked(struct synx_table_row *row);
+
+/**
+ * @brief: Function to return the current client (active process)
+ *
+ * @return The current client
+ */
+struct synx_client *get_current_client(void);
+
+/**
+ * @brief: Function to look up a synx handle
+ *         It also verifies the authenticity of the request through
+ *         the key provided.
+ *
+ * @param synx_id    : Synx handle
+ * @param secure_key : Key to verify authenticity
+
+ * @return The synx table entry corresponding to the given synx ID
+ */
+void *synx_from_key(s32 synx_id, u32 secure_key);
+
+/**
+ * @brief: Function to look up a synx handle using the backed dma fence
+ *
+ * @param fence : dma fence backing the synx object
+
+ * @return The synx table entry corresponding to the given dma fence.
+ * NULL otherwise.
+ */
+struct synx_table_row *synx_from_fence(struct dma_fence *fence);
+
+/**
+ * @brief: Function to look up a synx handle
+ *
+ * @param synx_id : Synx handle
+ *
+ * @return The synx corresponding to the given handle or NULL if
+ *         handle is invalid (or not permitted).
+ */
+void *synx_from_handle(s32 synx_id);
+
+/**
+ * @brief: Function to create a new synx handle
+ *
+ * @param pObj : Object to be associated with the created handle
+ *
+ * @return The created handle
+ */
+s32 synx_create_handle(void *pObj);
+
+/**
+ * @brief: Function to generate a secure key for authentication
+ *         Used to verify the requests generated on synx objects
+ *         not owned by the process.
+ *
+ * @param row : Pointer to the synx object row
+ *
+ * @return The created handle
+ */
+int synx_generate_secure_key(struct synx_table_row *row);
+
+#endif /* __SYNX_UTIL_H__ */
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 615beab..1c17657 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/bitops.h>
@@ -698,6 +698,10 @@
 		out->capability_type = get_hal_cap_type(in->capability_type);
 		out->min = in->min;
 		out->max = in->max;
+		if (in->capability_type == HFI_CAPABILITY_I_FRAME_QP ||
+			in->capability_type == HFI_CAPABILITY_P_FRAME_QP ||
+			in->capability_type == HFI_CAPABILITY_B_FRAME_QP)
+			++out->max;
 		out->step_size = in->step_size;
 	}
 }
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 71fada2..47d5d199 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -24,6 +24,7 @@
 #define QP_ENABLE_I 0x1
 #define QP_ENABLE_P 0x2
 #define QP_ENABLE_B 0x4
+#define INVALID_QP -1
 #define MAX_INTRA_REFRESH_MBS ((7680 * 4320) >> 8)
 #define MAX_LTR_FRAME_COUNT 10
 #define MAX_NUM_B_FRAMES 1
@@ -107,9 +108,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
 		.name = "HEVC I Frame Quantization",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 127,
-		.default_value = 0,
+		.minimum = INVALID_QP,
+		.maximum = INVALID_QP,
+		.default_value = INVALID_QP,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -118,9 +119,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
 		.name = "HEVC P Frame Quantization",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 127,
-		.default_value = 0,
+		.minimum = INVALID_QP,
+		.maximum = INVALID_QP,
+		.default_value = INVALID_QP,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -129,9 +130,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
 		.name = "HEVC B Frame Quantization",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 127,
-		.default_value = 0,
+		.minimum = INVALID_QP,
+		.maximum = INVALID_QP,
+		.default_value = INVALID_QP,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -140,9 +141,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
 		.name = "HEVC Quantization Range Minimum",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 0x7F7F7F,
-		.default_value = 0x010101,
+		.minimum = INVALID_QP,
+		.maximum = INVALID_QP,
+		.default_value = INVALID_QP,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -151,9 +152,9 @@
 		.id = V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP,
 		.name = "HEVC Quantization Range Maximum",
 		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 0,
-		.maximum = 0x7F7F7F,
-		.default_value = 0x7F7F7F,
+		.minimum = INVALID_QP,
+		.maximum = INVALID_QP,
+		.default_value = INVALID_QP,
 		.step = 1,
 		.menu_skip_mask = 0,
 		.qmenu = NULL,
@@ -1469,6 +1470,9 @@
 	struct msm_vidc_mastering_display_colour_sei_payload *mdisp_sei = NULL;
 	struct msm_vidc_content_light_level_sei_payload *cll_sei = NULL;
 	struct hal_buffer_requirements *buff_req_buffer = NULL;
+	struct v4l2_ctrl *i_qp = NULL;
+	struct v4l2_ctrl *p_qp = NULL;
+	struct v4l2_ctrl *b_qp = NULL;
 
 	if (!inst || !inst->core || !inst->core->device || !ctrl) {
 		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -1575,15 +1579,6 @@
 					__func__);
 		}
 		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_DYN_QP:
-		if (inst->state == MSM_VIDC_START_DONE) {
-			rc = msm_venc_set_dyn_qp(inst);
-			if (rc)
-				dprintk(VIDC_ERR,
-					"%s: ltr markframe failed\n",
-					__func__);
-		}
-		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
 		if (((ctrl->val >> 16) < inst->capability.frame_rate.min ||
 			(ctrl->val >> 16) > inst->capability.frame_rate.max) &&
@@ -1728,6 +1723,58 @@
 		inst->level |=
 			(msm_comm_v4l2_to_hfi(ctrl->id, ctrl->val) << 28);
 		break;
+	case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+	case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+		i_qp = msm_venc_get_ctrl(inst,
+			V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP);
+		if (!i_qp) {
+			dprintk(VIDC_ERR, "%s: get I QP failed\n", __func__);
+			return -EINVAL;
+		}
+		p_qp = msm_venc_get_ctrl(inst,
+			V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP);
+		if (!p_qp) {
+			dprintk(VIDC_ERR, "%s: get P QP failed\n", __func__);
+			return -EINVAL;
+		}
+		b_qp = msm_venc_get_ctrl(inst,
+			V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP);
+		if (!b_qp) {
+			dprintk(VIDC_ERR, "%s: get B QP failed\n", __func__);
+			return -EINVAL;
+		}
+		if ((ctrl->val & 0xff) < i_qp->minimum ||
+			((ctrl->val >> 8) & 0xff) < p_qp->minimum ||
+			((ctrl->val >> 16) & 0xff) < b_qp->minimum ||
+			(ctrl->val & 0xff) >= i_qp->maximum ||
+			((ctrl->val >> 8) & 0xff) >= p_qp->maximum ||
+			(inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8 &&
+			((ctrl->val >> 16) & 0xff) >= b_qp->maximum)) {
+			dprintk(VIDC_ERR, "Invalid QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+		i_qp = msm_venc_get_ctrl(inst,
+			V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP);
+		if (!i_qp) {
+			dprintk(VIDC_ERR, "%s: get I QP failed\n", __func__);
+			return -EINVAL;
+		}
+		if ((ctrl->val & 0xff) >= i_qp->maximum) {
+			dprintk(VIDC_ERR, "Invalid QP %#x\n", ctrl->val);
+			return -EINVAL;
+		}
+		if (inst->state == MSM_VIDC_START_DONE) {
+			rc = msm_venc_set_dyn_qp(inst, ctrl);
+			if (rc)
+				dprintk(VIDC_ERR,
+					"%s: setting dyn frame QP failed\n",
+					__func__);
+		}
+		break;
+	case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+	case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
 	case V4L2_CID_MPEG_VIDEO_B_FRAMES:
 	case V4L2_CID_ROTATE:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
@@ -1754,11 +1801,6 @@
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
 	case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
-	case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
-	case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
-	case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
-	case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
-	case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
 	case V4L2_CID_MPEG_VIDC_VIDEO_COLOR_SPACE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_FULL_RANGE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_TRANSFER_CHARS:
@@ -1772,7 +1814,7 @@
 	case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
 	case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_DIMENSIONS:
 	case V4L2_CID_MPEG_VIDC_VIDEO_PRIORITY:
-			dprintk(VIDC_DBG, "Control set: ID : %x Val : %d\n",
+		dprintk(VIDC_DBG, "Control set: ID : %x Val : %d\n",
 			ctrl->id, ctrl->val);
 		break;
 	default:
@@ -2307,9 +2349,10 @@
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *i_qp;
-	struct v4l2_ctrl *p_qp;
-	struct v4l2_ctrl *b_qp;
+	struct v4l2_ctrl *i_qp = NULL;
+	struct v4l2_ctrl *p_qp = NULL;
+	struct v4l2_ctrl *b_qp = NULL;
+	struct v4l2_ctrl *rc_enable = NULL;
 	struct hfi_quantization qp;
 
 	if (!inst || !inst->core) {
@@ -2319,6 +2362,7 @@
 	hdev = inst->core->device;
 
 	qp.layer_id = MSM_VIDC_ALL_LAYER_ID;
+	qp.enable = QP_ENABLE_I | QP_ENABLE_P | QP_ENABLE_B;
 
 	i_qp = msm_venc_get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP);
 	if (!i_qp) {
@@ -2338,9 +2382,47 @@
 		return -EINVAL;
 	}
 
-	/* This should happen based on which controls are set */
+	rc_enable = msm_venc_get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE);
+	if (!rc_enable) {
+		dprintk(VIDC_ERR, "%s: get rc enable failed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (rc_enable->val) {
+		if (i_qp->val >= i_qp->default_value ||
+			i_qp->val < i_qp->minimum)
+			qp.enable &= (!QP_ENABLE_I);
+		if (p_qp->val >= p_qp->default_value ||
+			p_qp->val < p_qp->minimum)
+			qp.enable &= (!QP_ENABLE_P);
+		if (b_qp->val >= b_qp->default_value ||
+			b_qp->val < b_qp->minimum)
+			qp.enable &= (!QP_ENABLE_B);
+
+		if (!(qp.enable & 0x7))
+			return 0;
+	} else {
+		if (i_qp->val >= i_qp->default_value ||
+			i_qp->val < i_qp->minimum) {
+			dprintk(VIDC_WARN,
+				"%s: Client value is not valid\n", __func__);
+			return -EINVAL;
+		}
+		if (p_qp->val >= p_qp->default_value ||
+			p_qp->val < p_qp->minimum)
+			p_qp->val = i_qp->val;
+		if (b_qp->val >= b_qp->default_value ||
+			b_qp->val < b_qp->minimum)
+			b_qp->val = i_qp->val;
+	}
+
+	/* B frame QP is not supported for VP8. */
+	if (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_VP8)
+		qp.enable &= (!QP_ENABLE_B);
+
 	qp.qp_packed = i_qp->val | p_qp->val << 8 | b_qp->val << 16;
-	qp.enable = QP_ENABLE_I | QP_ENABLE_P | QP_ENABLE_B;
+
 	dprintk(VIDC_DBG, "%s: layers %#x frames %#x qp_packed %#x\n",
 		__func__, qp.layer_id, qp.enable, qp.qp_packed);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
@@ -3266,12 +3348,12 @@
 	return rc;
 }
 
-int msm_venc_set_dyn_qp(struct msm_vidc_inst *inst)
+int msm_venc_set_dyn_qp(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
 	int rc = 0;
 	struct hfi_device *hdev;
-	struct v4l2_ctrl *ctrl;
-	struct hfi_quantization quant;
+	struct hfi_quantization qp;
+	struct v4l2_ctrl *rc_enable;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
@@ -3279,21 +3361,27 @@
 	}
 	hdev = inst->core->device;
 
-	ctrl = msm_venc_get_ctrl(inst,
-			V4L2_CID_MPEG_VIDC_VIDEO_DYN_QP);
-	if (!ctrl) {
-		dprintk(VIDC_ERR, "%s: get dyn qp ctrl failed\n", __func__);
+	rc_enable = msm_venc_get_ctrl(inst,
+		V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE);
+	if (!rc_enable) {
+		dprintk(VIDC_ERR, "%s: get rc enable failed\n", __func__);
 		return -EINVAL;
 	}
 
-	quant.qp_packed = ctrl->val | ctrl->val << 8 | ctrl->val << 16;
-	quant.enable = QP_ENABLE_I | QP_ENABLE_P | QP_ENABLE_B;
-	quant.layer_id = MSM_VIDC_ALL_LAYER_ID;
+	if (rc_enable->val) {
+		dprintk(VIDC_ERR, "%s: Dyn qp is set only when RC is OFF\n",
+			__func__);
+		return -EINVAL;
+	}
 
-	dprintk(VIDC_DBG, "%s: %d\n", __func__,
-			ctrl->val);
+	qp.qp_packed = ctrl->val | ctrl->val << 8 | ctrl->val << 16;
+	qp.enable = QP_ENABLE_I | QP_ENABLE_P | QP_ENABLE_B;
+	qp.layer_id = MSM_VIDC_ALL_LAYER_ID;
+
+	dprintk(VIDC_DBG, "%s: %#x\n", __func__,
+		ctrl->val);
 	rc = call_hfi_op(hdev, session_set_property, inst->session,
-		HFI_PROPERTY_CONFIG_VENC_FRAME_QP, &quant, sizeof(quant));
+		HFI_PROPERTY_CONFIG_VENC_FRAME_QP, &qp, sizeof(qp));
 	if (rc)
 		dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.h b/drivers/media/platform/msm/vidc/msm_venc.h
index b6fa2a2..fef651e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.h
+++ b/drivers/media/platform/msm/vidc/msm_venc.h
@@ -27,7 +27,7 @@
 int msm_venc_set_intra_period(struct msm_vidc_inst *inst);
 int msm_venc_set_ltr_useframe(struct msm_vidc_inst *inst);
 int msm_venc_set_ltr_markframe(struct msm_vidc_inst *inst);
-int msm_venc_set_dyn_qp(struct msm_vidc_inst *inst);
+int msm_venc_set_dyn_qp(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl);
 int msm_venc_set_request_keyframe(struct msm_vidc_inst *inst);
 int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst);
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 95e37d3..8d01533 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1312,7 +1312,7 @@
 	if (ctrl) {
 		v4l2_ctrl_modify_range(ctrl, capability->min,
 				capability->max, ctrl->step,
-				ctrl->default_value);
+				capability->max);
 		dprintk(VIDC_DBG,
 			"%s: Updated Range = %lld --> %lld Def value = %lld\n",
 			ctrl->name, ctrl->minimum, ctrl->maximum,
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index ec21388..6567748 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -12,6 +12,16 @@
 	  If you want MMC/SD/SDIO support, you should say Y here and
 	  also to your specific host controller driver.
 
+config MMC_PERF_PROFILING
+	bool "MMC performance profiling"
+	depends on MMC != n
+	default n
+	help
+	  This enables the support for collecting performance numbers
+	  for the MMC at the Queue and Host layers.
+
+	  If you want to collect MMC performance numbers, say Y here.
+
 if MMC
 
 source "drivers/mmc/core/Kconfig"
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 42e8906..7fda440 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -61,6 +61,17 @@
 
 	  If unsure, say 8 here.
 
+config MMC_BLOCK_DEFERRED_RESUME
+	bool "Defer MMC layer resume until I/O is requested"
+	depends on MMC_BLOCK
+	default n
+	help
+	  Say Y here to enable deferred MMC resume until I/O
+	  is requested.
+
+	  This will reduce overall resume latency and
+	  save power when there is an SD card inserted but not being used.
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	depends on TTY
@@ -80,3 +91,23 @@
 	  This driver is only of interest to those developing or
 	  testing a host driver. Most people should say N here.
 
+config MMC_RING_BUFFER
+	bool "MMC_RING_BUFFER"
+	depends on MMC
+	default n
+	help
+	  This enables the ring buffer tracing of significant
+	  events for mmc driver to provide command history for
+	  debugging purpose.
+
+	  If unsure, say N.
+
+config MMC_CLKGATE
+	bool "MMC host clock gating"
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
\ No newline at end of file
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index abba078..ffeb27b 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -14,6 +14,7 @@
 obj-$(CONFIG_PWRSEQ_SD8787)	+= pwrseq_sd8787.o
 obj-$(CONFIG_PWRSEQ_EMMC)	+= pwrseq_emmc.o
 mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o
+obj-$(CONFIG_MMC_RING_BUFFER)	+= ring_buffer.o
 obj-$(CONFIG_MMC_BLOCK)		+= mmc_block.o
 mmc_block-objs			:= block.o queue.o
 obj-$(CONFIG_MMC_TEST)		+= mmc_test.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index e201ccb..d77fce7 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -31,6 +31,7 @@
 #include <linux/cdev.h>
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
+#include <linux/bitops.h>
 #include <linux/string_helpers.h>
 #include <linux/delay.h>
 #include <linux/capability.h>
@@ -41,6 +42,7 @@
 
 #include <linux/mmc/ioctl.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/sd.h>
@@ -69,7 +71,7 @@
  * second software timer to timeout the whole request, so 10 seconds should be
  * ample.
  */
-#define MMC_BLK_TIMEOUT_MS  (10 * 1000)
+#define MMC_BLK_TIMEOUT_MS  (30 * 1000)
 #define MMC_SANITIZE_REQ_TIMEOUT 240000
 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
@@ -110,6 +112,7 @@
 	unsigned int	flags;
 #define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
 #define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
+#define MMC_BLK_PACKED_CMD	(1 << 2) /* MMC packed command support */
 
 	unsigned int	usage;
 	unsigned int	read_only;
@@ -120,7 +123,7 @@
 #define MMC_BLK_DISCARD		BIT(2)
 #define MMC_BLK_SECDISCARD	BIT(3)
 #define MMC_BLK_CQE_RECOVERY	BIT(4)
-
+#define MMC_BLK_FLUSH		BIT(5)
 	/*
 	 * Only set in main mmc_blk_data associated
 	 * with mmc_card with dev_set_drvdata, and keeps
@@ -210,9 +213,13 @@
 {
 	int ret;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-	struct mmc_card *card = md->queue.card;
+	struct mmc_card *card;
 	int locked = 0;
 
+	if (!md)
+		return -EINVAL;
+
+	card = md->queue.card;
 	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
 		locked = 2;
 	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
@@ -241,6 +248,8 @@
 		return count;
 
 	md = mmc_blk_get(dev_to_disk(dev));
+	if (!md)
+		return -EINVAL;
 	mq = &md->queue;
 
 	/* Dispatch locking to the block layer */
@@ -276,6 +285,9 @@
 	int ret;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 
+	if (!md)
+		return -EINVAL;
+
 	ret = snprintf(buf, PAGE_SIZE, "%d\n",
 		       get_disk_ro(dev_to_disk(dev)) ^
 		       md->read_only);
@@ -290,6 +302,10 @@
 	char *end;
 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 	unsigned long set = simple_strtoul(buf, &end, 0);
+
+	if (!md)
+		return -EINVAL;
+
 	if (end == buf) {
 		ret = -EINVAL;
 		goto out;
@@ -445,11 +461,12 @@
 {
 	int err;
 
-	if (!mmc_can_sanitize(card)) {
-			pr_warn("%s: %s - SANITIZE is not supported\n",
+	if (!mmc_can_sanitize(card) &&
+			(card->host->caps2 & MMC_CAP2_SANITIZE)) {
+		pr_warn("%s: %s - SANITIZE is not supported\n",
 				mmc_hostname(card->host), __func__);
-			err = -EOPNOTSUPP;
-			goto out;
+		err = -EOPNOTSUPP;
+		goto out;
 	}
 
 	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
@@ -634,13 +651,13 @@
 	struct request *req;
 
 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
-	if (IS_ERR(idata))
+	if (IS_ERR_OR_NULL(idata))
 		return PTR_ERR(idata);
 	/* This will be NULL on non-RPMB ioctl():s */
 	idata->rpmb = rpmb;
 
 	card = md->queue.card;
-	if (IS_ERR(card)) {
+	if (IS_ERR_OR_NULL(card)) {
 		err = PTR_ERR(card);
 		goto cmd_done;
 	}
@@ -851,7 +868,8 @@
 	int ret = 0;
 	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
 
-	if (main_md->part_curr == part_type)
+	if ((main_md->part_curr == part_type) &&
+	    (card->part_curr == part_type))
 		return 0;
 
 	if (mmc_card_mmc(card)) {
@@ -868,11 +886,15 @@
 				 EXT_CSD_PART_CONFIG, part_config,
 				 card->ext_csd.part_time);
 		if (ret) {
+			pr_err("%s: %s: switch failure, %d -> %d\n",
+				mmc_hostname(card->host), __func__,
+				main_md->part_curr, part_type);
 			mmc_blk_part_switch_post(card, part_type);
 			return ret;
 		}
 
 		card->ext_csd.part_config = part_config;
+		card->part_curr = part_type;
 
 		ret = mmc_blk_part_switch_post(card, main_md->part_curr);
 	}
@@ -1028,8 +1050,15 @@
 
 	md->reset_done |= type;
 	err = mmc_hw_reset(host);
+	if (err && err != -EOPNOTSUPP) {
+		/* We failed to reset so we need to abort the request */
+		pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
+				__func__, err);
+		return -ENODEV;
+	}
+
 	/* Ensure we switch back to the correct partition */
-	if (err != -EOPNOTSUPP) {
+	if (host->card) {
 		struct mmc_blk_data *main_md =
 			dev_get_drvdata(&host->card->dev);
 		int part_err;
@@ -1236,6 +1265,21 @@
 	int ret = 0;
 
 	ret = mmc_flush_cache(card);
+	if (ret == -ENODEV) {
+		pr_err("%s: %s: restart mmc card\n",
+				req->rq_disk->disk_name, __func__);
+		if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
+			pr_err("%s: %s: fail to restart mmc\n",
+				req->rq_disk->disk_name, __func__);
+		else
+			mmc_blk_reset_success(md, MMC_BLK_FLUSH);
+	}
+
+	if (ret) {
+		pr_err("%s: %s: notify flush error to upper layers\n",
+				req->rq_disk->disk_name, __func__);
+		ret = -EIO;
+	}
 	blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
@@ -2938,6 +2982,10 @@
 
 	dev_set_drvdata(&card->dev, md);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -2949,7 +2997,7 @@
 	/* Add two debugfs entries */
 	mmc_blk_add_debugfs(card, md);
 
-	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+	pm_runtime_set_autosuspend_delay(&card->dev, MMC_AUTOSUSPEND_DELAY_MS);
 	pm_runtime_use_autosuspend(&card->dev);
 
 	/*
@@ -2986,6 +3034,9 @@
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index fc92c6c..94d9f84 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -134,6 +134,16 @@
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (!drv) {
+		pr_debug("%s: %s: drv is NULL\n", dev_name(dev), __func__);
+		return;
+	}
+
+	if (!card) {
+		pr_debug("%s: %s: card is NULL\n", dev_name(dev), __func__);
+		return;
+	}
+
 	if (dev->driver && drv->shutdown)
 		drv->shutdown(card);
 
@@ -156,6 +166,8 @@
 	if (ret)
 		return ret;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
 	ret = host->bus_ops->suspend(host);
 	if (ret)
 		pm_generic_resume(dev);
@@ -169,11 +181,17 @@
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (mmc_bus_manual_resume(host)) {
+		host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+		goto skip_full_resume;
+	}
+
 	ret = host->bus_ops->resume(host);
 	if (ret)
 		pr_warn("%s: error %d during resume (card was removed?)\n",
 			mmc_hostname(host), ret);
 
+skip_full_resume:
 	ret = pm_generic_resume(dev);
 	return ret;
 }
@@ -185,6 +203,9 @@
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	struct mmc_host *host = card->host;
 
+	if (mmc_bus_needs_resume(host))
+		return 0;
+
 	return host->bus_ops->runtime_suspend(host);
 }
 
@@ -193,8 +214,12 @@
 	struct mmc_card *card = mmc_dev_to_card(dev);
 	struct mmc_host *host = card->host;
 
+	if (mmc_bus_needs_resume(host))
+		host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+
 	return host->bus_ops->runtime_resume(host);
 }
+
 #endif /* !CONFIG_PM */
 
 static const struct dev_pm_ops mmc_bus_pm_ops = {
@@ -278,6 +303,8 @@
 	card->dev.release = mmc_release_card;
 	card->dev.type = type;
 
+	spin_lock_init(&card->bkops.stats.lock);
+
 	return card;
 }
 
@@ -353,13 +380,19 @@
 #endif
 	card->dev.of_node = mmc_of_find_child_device(card->host, 0);
 
-	device_enable_async_suspend(&card->dev);
+	if (mmc_card_sdio(card)) {
+		ret = device_init_wakeup(&card->dev, true);
+		if (ret)
+			pr_err("%s: %s: failed to init wakeup: %d\n",
+			       mmc_hostname(card->host), __func__, ret);
+	}
 
 	ret = device_add(&card->dev);
 	if (ret)
 		return ret;
 
 	mmc_card_set_present(card);
+	device_enable_async_suspend(&card->dev);
 
 	return 0;
 }
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 72b0ef03..e321365 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -20,7 +20,7 @@
 static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf)	\
 {										\
 	struct mmc_card *card = mmc_dev_to_card(dev);				\
-	return sprintf(buf, fmt, args);						\
+	return snprintf(buf, PAGE_SIZE, fmt, args);			\
 }										\
 static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
 
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 1170feb..7cde927 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -23,8 +23,9 @@
 #define MMC_STATE_BLOCKADDR	(1<<2)		/* card uses block-addressing */
 #define MMC_CARD_SDXC		(1<<3)		/* card is SDXC */
 #define MMC_CARD_REMOVED	(1<<4)		/* card has been removed */
-#define MMC_STATE_DOING_BKOPS	(1<<5)		/* card is doing BKOPS */
+#define MMC_STATE_DOING_BKOPS	(1<<5)		/* card is doing manual BKOPS */
 #define MMC_STATE_SUSPENDED	(1<<6)		/* card is suspended */
+#define MMC_STATE_AUTO_BKOPS	(1<<13)		/* card is doing auto BKOPS */
 
 #define mmc_card_present(c)	((c)->state & MMC_STATE_PRESENT)
 #define mmc_card_readonly(c)	((c)->state & MMC_STATE_READONLY)
@@ -33,6 +34,7 @@
 #define mmc_card_removed(c)	((c) && ((c)->state & MMC_CARD_REMOVED))
 #define mmc_card_doing_bkops(c)	((c)->state & MMC_STATE_DOING_BKOPS)
 #define mmc_card_suspended(c)	((c)->state & MMC_STATE_SUSPENDED)
+#define mmc_card_doing_auto_bkops(c)	((c)->state & MMC_STATE_AUTO_BKOPS)
 
 #define mmc_card_set_present(c)	((c)->state |= MMC_STATE_PRESENT)
 #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -43,6 +45,8 @@
 #define mmc_card_clr_doing_bkops(c)	((c)->state &= ~MMC_STATE_DOING_BKOPS)
 #define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
 #define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+#define mmc_card_set_auto_bkops(c)	((c)->state |= MMC_STATE_AUTO_BKOPS)
+#define mmc_card_clr_auto_bkops(c)	((c)->state &= ~MMC_STATE_AUTO_BKOPS)
 
 /*
  * The world is not perfect and supplies us with broken mmc/sdio devices.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 50a5c34..f50e922 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/completion.h>
+#include <linux/devfreq.h>
 #include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/pagemap.h>
@@ -29,13 +30,14 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/jiffies.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/mmc/sd.h>
 #include <linux/mmc/slot-gpio.h>
-
+#include <linux/sched.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/mmc.h>
 
@@ -112,6 +114,671 @@
 
 #endif /* CONFIG_FAIL_MMC_REQUEST */
 
+static bool mmc_is_data_request(struct mmc_request *mmc_request)
+{
+	switch (mmc_request->cmd->opcode) {
+	case MMC_READ_SINGLE_BLOCK:
+	case MMC_READ_MULTIPLE_BLOCK:
+	case MMC_WRITE_BLOCK:
+	case MMC_WRITE_MULTIPLE_BLOCK:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
+{
+	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		return;
+
+	if (lock_needed)
+		spin_lock_bh(&clk_scaling->lock);
+
+	clk_scaling->start_busy = ktime_get();
+	clk_scaling->is_busy_started = true;
+
+	if (lock_needed)
+		spin_unlock_bh(&clk_scaling->lock);
+}
+
+static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
+{
+	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		return;
+
+	if (lock_needed)
+		spin_lock_bh(&clk_scaling->lock);
+
+	if (!clk_scaling->is_busy_started) {
+		WARN_ON(1);
+		goto out;
+	}
+
+	clk_scaling->total_busy_time_us +=
+		ktime_to_us(ktime_sub(ktime_get(),
+			clk_scaling->start_busy));
+	pr_debug("%s: accumulated busy time is %lu usec\n",
+		mmc_hostname(host), clk_scaling->total_busy_time_us);
+	clk_scaling->is_busy_started = false;
+
+out:
+	if (lock_needed)
+		spin_unlock_bh(&clk_scaling->lock);
+}
+
+/**
+ * mmc_can_scale_clk() - Check clock scaling capability
+ * @host: pointer to mmc host structure
+ */
+bool mmc_can_scale_clk(struct mmc_host *host)
+{
+	if (!host) {
+		pr_err("bad host parameter\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	return host->caps2 & MMC_CAP2_CLK_SCALE;
+}
+EXPORT_SYMBOL(mmc_can_scale_clk);
+
+static int mmc_devfreq_get_dev_status(struct device *dev,
+		struct devfreq_dev_status *status)
+{
+	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+	struct mmc_devfeq_clk_scaling *clk_scaling;
+
+	if (!host) {
+		pr_err("bad host parameter\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		return 0;
+
+	spin_lock_bh(&clk_scaling->lock);
+
+	/* accumulate the busy time of ongoing work */
+	memset(status, 0, sizeof(*status));
+	if (clk_scaling->is_busy_started) {
+		mmc_clk_scaling_stop_busy(host, false);
+		mmc_clk_scaling_start_busy(host, false);
+	}
+
+	status->busy_time = clk_scaling->total_busy_time_us;
+	status->total_time = ktime_to_us(ktime_sub(ktime_get(),
+		clk_scaling->measure_interval_start));
+	clk_scaling->total_busy_time_us = 0;
+	status->current_frequency = clk_scaling->curr_freq;
+	clk_scaling->measure_interval_start = ktime_get();
+
+	pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
+		mmc_hostname(host),
+		(status->busy_time*100)/status->total_time,
+		status->total_time, status->busy_time,
+		status->current_frequency);
+
+	spin_unlock_bh(&clk_scaling->lock);
+
+	return 0;
+}
+
+static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
+{
+	struct mmc_card *card = host->card;
+	u32 status;
+
+	/*
+	 * If the current partition type is RPMB, clock switching may not
+	 * work properly as sending tuning command (CMD21) is illegal in
+	 * this mode.
+	 */
+	if (!card || (mmc_card_mmc(card) &&
+			(card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
+			mmc_card_doing_bkops(card))))
+		return false;
+
+	if (mmc_send_status(card, &status)) {
+		pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+		return false;
+	}
+
+	return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
+}
+
+int mmc_clk_update_freq(struct mmc_host *host,
+		unsigned long freq, enum mmc_load state)
+{
+	int err = 0;
+
+	if (!host) {
+		pr_err("bad host parameter\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mmc_host_clk_hold(host);
+
+	/* make sure the card supports the frequency we want */
+	if (unlikely(freq > host->card->clk_scaling_highest)) {
+		freq = host->card->clk_scaling_highest;
+		pr_warn("%s: %s: frequency was overridden to %lu\n",
+				mmc_hostname(host), __func__,
+				host->card->clk_scaling_highest);
+	}
+
+	if (unlikely(freq < host->card->clk_scaling_lowest)) {
+		freq = host->card->clk_scaling_lowest;
+		pr_warn("%s: %s: frequency was overridden to %lu\n",
+			mmc_hostname(host), __func__,
+			host->card->clk_scaling_lowest);
+	}
+
+	if (freq == host->clk_scaling.curr_freq)
+		goto out;
+
+	if (host->ops->notify_load) {
+		err = host->ops->notify_load(host, state);
+		if (err) {
+			pr_err("%s: %s: fail on notify_load\n",
+				mmc_hostname(host), __func__);
+			goto out;
+		}
+	}
+
+	if (!mmc_is_valid_state_for_clk_scaling(host)) {
+		pr_debug("%s: invalid state for clock scaling - skipping\n",
+			mmc_hostname(host));
+		goto invalid_state;
+	}
+
+	err = host->bus_ops->change_bus_speed(host, &freq);
+	if (!err)
+		host->clk_scaling.curr_freq = freq;
+	else
+		pr_err("%s: %s: failed (%d) at freq=%lu\n",
+			mmc_hostname(host), __func__, err, freq);
+
+invalid_state:
+	if (err) {
+		/* restore previous state */
+		if (host->ops->notify_load)
+			if (host->ops->notify_load(host,
+				host->clk_scaling.state))
+				pr_err("%s: %s: fail on notify_load restore\n",
+					mmc_hostname(host), __func__);
+	}
+out:
+	mmc_host_clk_release(host);
+	return err;
+}
+EXPORT_SYMBOL(mmc_clk_update_freq);
+
+static int mmc_devfreq_set_target(struct device *dev,
+				unsigned long *freq, u32 devfreq_flags)
+{
+	struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+	struct mmc_devfeq_clk_scaling *clk_scaling;
+	int err = 0;
+	int abort;
+	unsigned long pflags = current->flags;
+
+	/* Ensure scaling would happen even in memory pressure conditions */
+	current->flags |= PF_MEMALLOC;
+
+	if (!(host && freq)) {
+		pr_err("%s: unexpected host/freq parameter\n", __func__);
+		err = -EINVAL;
+		goto out;
+	}
+
+	clk_scaling = &host->clk_scaling;
+
+	if (!clk_scaling->enable)
+		goto out;
+
+	pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
+		*freq, current->comm);
+
+	spin_lock_bh(&clk_scaling->lock);
+	if (clk_scaling->target_freq == *freq ||
+		clk_scaling->skip_clk_scale_freq_update) {
+		spin_unlock_bh(&clk_scaling->lock);
+		goto out;
+	}
+
+	clk_scaling->need_freq_change = true;
+	clk_scaling->target_freq = *freq;
+	clk_scaling->state = *freq < clk_scaling->curr_freq ?
+		MMC_LOAD_LOW : MMC_LOAD_HIGH;
+	spin_unlock_bh(&clk_scaling->lock);
+
+	if (!clk_scaling->is_suspended && host->ios.clock)
+		abort = __mmc_claim_host(host, NULL,
+				&clk_scaling->devfreq_abort);
+	else
+		goto out;
+
+	if (abort)
+		goto out;
+
+	/*
+	 * In case we were able to claim host there is no need to
+	 * defer the frequency change. It will be done now
+	 */
+	clk_scaling->need_freq_change = false;
+
+	mmc_host_clk_hold(host);
+	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
+	if (err && err != -EAGAIN)
+		pr_err("%s: clock scale to %lu failed with error %d\n",
+			mmc_hostname(host), *freq, err);
+	else
+		pr_debug("%s: clock change to %lu finished successfully (%s)\n",
+			mmc_hostname(host), *freq, current->comm);
+
+	mmc_host_clk_release(host);
+	mmc_release_host(host);
+out:
+	current->flags &= ~PF_MEMALLOC;
+	current->flags |= pflags & PF_MEMALLOC;
+	return err;
+}
+
+/**
+ * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
+ * @host: pointer to mmc host structure
+ *
+ * This function does clock scaling in case "need_freq_change" flag was set
+ * by the clock scaling logic.
+ */
+void mmc_deferred_scaling(struct mmc_host *host)
+{
+	unsigned long target_freq;
+	int err;
+	struct mmc_devfeq_clk_scaling clk_scaling;
+
+	if (!host->clk_scaling.enable)
+		return;
+
+	spin_lock_bh(&host->clk_scaling.lock);
+
+	if (!host->clk_scaling.need_freq_change) {
+		spin_unlock_bh(&host->clk_scaling.lock);
+		return;
+	}
+
+
+	atomic_inc(&host->clk_scaling.devfreq_abort);
+	target_freq = host->clk_scaling.target_freq;
+	/*
+	 * Store the clock scaling state while the lock is acquired so that
+	 * if devfreq context modifies clk_scaling, it will get reflected only
+	 * in the next deferred scaling check.
+	 */
+	clk_scaling = host->clk_scaling;
+	host->clk_scaling.need_freq_change = false;
+	spin_unlock_bh(&host->clk_scaling.lock);
+	pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
+				mmc_hostname(host),
+				target_freq, current->comm);
+
+	err = mmc_clk_update_freq(host, target_freq,
+		clk_scaling.state);
+	if (err && err != -EAGAIN)
+		pr_err("%s: failed on deferred scale clocks (%d)\n",
+			mmc_hostname(host), err);
+	else
+		pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
+			mmc_hostname(host),
+			target_freq, current->comm);
+
+	atomic_dec(&host->clk_scaling.devfreq_abort);
+}
+EXPORT_SYMBOL(mmc_deferred_scaling);
+
+static int mmc_devfreq_create_freq_table(struct mmc_host *host)
+{
+	int i;
+	struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+	pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
+		mmc_hostname(host),
+		host->card->clk_scaling_lowest,
+		host->card->clk_scaling_highest);
+
+	/*
+	 * Create the frequency table and initialize it with default values.
+	 * Initialize it with platform specific frequencies if the frequency
+	 * table supplied by platform driver is present, otherwise initialize
+	 * it with min and max frequencies supported by the card.
+	 */
+	if (!clk_scaling->freq_table) {
+		if (clk_scaling->pltfm_freq_table_sz)
+			clk_scaling->freq_table_sz =
+				clk_scaling->pltfm_freq_table_sz;
+		else
+			clk_scaling->freq_table_sz = 2;
+
+		clk_scaling->freq_table = kzalloc(
+			(clk_scaling->freq_table_sz *
+			sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
+		if (!clk_scaling->freq_table)
+			return -ENOMEM;
+
+		if (clk_scaling->pltfm_freq_table) {
+			memcpy(clk_scaling->freq_table,
+				clk_scaling->pltfm_freq_table,
+				(clk_scaling->pltfm_freq_table_sz *
+				sizeof(*(clk_scaling->pltfm_freq_table))));
+		} else {
+			pr_debug("%s: no frequency table defined -  setting default\n",
+				mmc_hostname(host));
+			clk_scaling->freq_table[0] =
+				host->card->clk_scaling_lowest;
+			clk_scaling->freq_table[1] =
+				host->card->clk_scaling_highest;
+			goto out;
+		}
+	}
+
+	if (host->card->clk_scaling_lowest >
+		clk_scaling->freq_table[0])
+		pr_debug("%s: frequency table undershot possible freq\n",
+			mmc_hostname(host));
+
+	for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+		if (clk_scaling->freq_table[i] <=
+			host->card->clk_scaling_highest)
+			continue;
+		clk_scaling->freq_table[i] =
+			host->card->clk_scaling_highest;
+		clk_scaling->freq_table_sz = i + 1;
+		pr_debug("%s: frequency table overshot possible freq (%d)\n",
+				mmc_hostname(host), clk_scaling->freq_table[i]);
+		break;
+	}
+
+out:
+	/**
+	 * devfreq requires unsigned long type freq_table while the
+	 * freq_table in clk_scaling is un32. Here allocates an individual
+	 * memory space for it and release it when exit clock scaling.
+	 */
+	clk_scaling->devfreq_profile.freq_table =  kzalloc(
+			clk_scaling->freq_table_sz *
+			sizeof(*(clk_scaling->devfreq_profile.freq_table)),
+			GFP_KERNEL);
+	if (!clk_scaling->devfreq_profile.freq_table)
+		return -ENOMEM;
+	clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
+
+	for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+		clk_scaling->devfreq_profile.freq_table[i] =
+			clk_scaling->freq_table[i];
+		pr_debug("%s: freq[%d] = %u\n",
+			mmc_hostname(host), i, clk_scaling->freq_table[i]);
+	}
+
+	return 0;
+}
+
+/**
+ * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Initialize clock scaling for supported hosts. It is assumed that the caller
+ * ensure clock is running at maximum possible frequency before calling this
+ * function. Shall use struct devfreq_simple_ondemand_data to configure
+ * governor.
+ */
+int mmc_init_clk_scaling(struct mmc_host *host)
+{
+	int err;
+
+	if (!host || !host->card) {
+		pr_err("%s: unexpected host/card parameters\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host) ||
+		!host->bus_ops->change_bus_speed) {
+		pr_debug("%s: clock scaling is not supported\n",
+			mmc_hostname(host));
+		return 0;
+	}
+
+	pr_debug("registering %s dev (%pK) to devfreq\n",
+		mmc_hostname(host),
+		mmc_classdev(host));
+
+	if (host->clk_scaling.devfreq) {
+		pr_err("%s: dev is already registered for dev %pK\n",
+			mmc_hostname(host),
+			mmc_dev(host));
+		return -EPERM;
+	}
+	spin_lock_init(&host->clk_scaling.lock);
+	atomic_set(&host->clk_scaling.devfreq_abort, 0);
+	host->clk_scaling.curr_freq = host->ios.clock;
+	host->clk_scaling.need_freq_change = false;
+	host->clk_scaling.is_busy_started = false;
+
+	host->clk_scaling.devfreq_profile.polling_ms =
+		host->clk_scaling.polling_delay_ms;
+	host->clk_scaling.devfreq_profile.get_dev_status =
+		mmc_devfreq_get_dev_status;
+	host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
+	host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
+
+	host->clk_scaling.ondemand_gov_data.simple_scaling = true;
+	host->clk_scaling.ondemand_gov_data.upthreshold =
+		host->clk_scaling.upthreshold;
+	host->clk_scaling.ondemand_gov_data.downdifferential =
+		host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
+
+	err = mmc_devfreq_create_freq_table(host);
+	if (err) {
+		pr_err("%s: fail to create devfreq frequency table\n",
+			mmc_hostname(host));
+		return err;
+	}
+
+	pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
+		mmc_hostname(host),
+		host->clk_scaling.ondemand_gov_data.upthreshold,
+		host->clk_scaling.ondemand_gov_data.downdifferential,
+		host->clk_scaling.devfreq_profile.polling_ms);
+	host->clk_scaling.devfreq = devfreq_add_device(
+		mmc_classdev(host),
+		&host->clk_scaling.devfreq_profile,
+		"simple_ondemand",
+		&host->clk_scaling.ondemand_gov_data);
+	if (!host->clk_scaling.devfreq) {
+		pr_err("%s: unable to register with devfreq\n",
+			mmc_hostname(host));
+		return -EPERM;
+	}
+
+	pr_debug("%s: clk scaling is enabled for device %s (%pK) with devfreq %pK (clock = %uHz)\n",
+		mmc_hostname(host),
+		dev_name(mmc_classdev(host)),
+		mmc_classdev(host),
+		host->clk_scaling.devfreq,
+		host->ios.clock);
+
+	host->clk_scaling.enable = true;
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_init_clk_scaling);
+
+/**
+ * mmc_suspend_clk_scaling() - suspend clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will suspend devfreq feature for the specific host.
+ * The statistics collected by mmc will be cleared.
+ * This function is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_suspend_clk_scaling(struct mmc_host *host)
+{
+	int err;
+
+	if (!host) {
+		WARN(1, "bad host parameter\n");
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable ||
+			host->clk_scaling.is_suspended)
+		return 0;
+
+	if (!host->clk_scaling.devfreq) {
+		pr_err("%s: %s: no devfreq is assosiated with this device\n",
+			mmc_hostname(host), __func__);
+		return -EPERM;
+	}
+
+	atomic_inc(&host->clk_scaling.devfreq_abort);
+	wake_up(&host->wq);
+	err = devfreq_suspend_device(host->clk_scaling.devfreq);
+	if (err) {
+		pr_err("%s: %s: failed to suspend devfreq\n",
+			mmc_hostname(host), __func__);
+		return err;
+	}
+	host->clk_scaling.is_suspended = true;
+
+	host->clk_scaling.total_busy_time_us = 0;
+
+	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_suspend_clk_scaling);
+
+/**
+ * mmc_resume_clk_scaling() - resume clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will resume devfreq feature for the specific host.
+ * This API is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_resume_clk_scaling(struct mmc_host *host)
+{
+	int err = 0;
+	u32 max_clk_idx = 0;
+	u32 devfreq_max_clk = 0;
+	u32 devfreq_min_clk = 0;
+
+	if (!host) {
+		WARN(1, "bad host parameter\n");
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host))
+		return 0;
+
+	/*
+	 * If clock scaling is already exited when resume is called, like
+	 * during mmc shutdown, it is not an error and should not fail the
+	 * API calling this.
+	 */
+	if (!host->clk_scaling.devfreq) {
+		pr_warn("%s: %s: no devfreq is assosiated with this device\n",
+			mmc_hostname(host), __func__);
+		return 0;
+	}
+
+	atomic_set(&host->clk_scaling.devfreq_abort, 0);
+
+	max_clk_idx = host->clk_scaling.freq_table_sz - 1;
+	devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
+	devfreq_min_clk = host->clk_scaling.freq_table[0];
+
+	host->clk_scaling.curr_freq = devfreq_max_clk;
+	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
+		host->clk_scaling.curr_freq = devfreq_min_clk;
+	host->clk_scaling.target_freq = host->clk_scaling.curr_freq;
+
+	err = devfreq_resume_device(host->clk_scaling.devfreq);
+	if (err) {
+		pr_err("%s: %s: failed to resume devfreq (%d)\n",
+			mmc_hostname(host), __func__, err);
+	} else {
+		host->clk_scaling.is_suspended = false;
+		pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(mmc_resume_clk_scaling);
+
+/**
+ * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Disable clock scaling permanently.
+ */
+int mmc_exit_clk_scaling(struct mmc_host *host)
+{
+	int err;
+
+	if (!host) {
+		pr_err("%s: bad host parameter\n", __func__);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (!mmc_can_scale_clk(host))
+		return 0;
+
+	if (!host->clk_scaling.devfreq) {
+		pr_err("%s: %s: no devfreq is assosiated with this device\n",
+			mmc_hostname(host), __func__);
+		return -EPERM;
+	}
+
+	err = mmc_suspend_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__,  err);
+		return err;
+	}
+
+	err = devfreq_remove_device(host->clk_scaling.devfreq);
+	if (err) {
+		pr_err("%s: remove devfreq failed (%d)\n",
+			mmc_hostname(host), err);
+		return err;
+	}
+
+	kfree(host->clk_scaling.devfreq_profile.freq_table);
+
+	host->clk_scaling.devfreq = NULL;
+	atomic_set(&host->clk_scaling.devfreq_abort, 1);
+
+	kfree(host->clk_scaling.freq_table);
+	host->clk_scaling.freq_table = NULL;
+
+	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
+
+	return 0;
+}
+EXPORT_SYMBOL(mmc_exit_clk_scaling);
+
 static inline void mmc_complete_cmd(struct mmc_request *mrq)
 {
 	if (mrq->cap_cmd_during_tfr && !completion_done(&mrq->cmd_completion))
@@ -142,6 +809,12 @@
 {
 	struct mmc_command *cmd = mrq->cmd;
 	int err = cmd->error;
+#ifdef CONFIG_MMC_PERF_PROFILING
+	ktime_t diff;
+#endif
+
+	if (host->clk_scaling.is_busy_started)
+		mmc_clk_scaling_stop_busy(host, true);
 
 	/* Flag re-tuning needed on CRC errors */
 	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
@@ -192,6 +865,25 @@
 			cmd->resp[2], cmd->resp[3]);
 
 		if (mrq->data) {
+#ifdef CONFIG_MMC_PERF_PROFILING
+			if (host->perf_enable) {
+				diff = ktime_sub(ktime_get(),
+						host->perf.start);
+				if (mrq->data->flags == MMC_DATA_READ) {
+					host->perf.rbytes_drv +=
+						mrq->data->bytes_xfered;
+					host->perf.rtime_drv =
+						ktime_add(host->perf.rtime_drv,
+							diff);
+				} else {
+					host->perf.wbytes_drv +=
+						mrq->data->bytes_xfered;
+					host->perf.wtime_drv =
+						ktime_add(host->perf.wtime_drv,
+							diff);
+				}
+			}
+#endif
 			pr_debug("%s:     %d bytes transferred: %d\n",
 				mmc_hostname(host),
 				mrq->data->bytes_xfered, mrq->data->error);
@@ -204,6 +896,7 @@
 				mrq->stop->resp[0], mrq->stop->resp[1],
 				mrq->stop->resp[2], mrq->stop->resp[3]);
 		}
+		mmc_host_clk_release(host);
 	}
 	/*
 	 * Request starter must handle retries - see
@@ -282,8 +975,7 @@
 	}
 
 	if (mrq->data) {
-		pr_debug("%s:     blksz %d blocks %d flags %08x "
-			"tsac %d ms nsac %d\n",
+		pr_debug("%s: blksz %d blocks %d flags %08x tsac %d ms nsac %d\n",
 			mmc_hostname(host), mrq->data->blksz,
 			mrq->data->blocks, mrq->data->flags,
 			mrq->data->timeout_ns / 1000000,
@@ -329,6 +1021,10 @@
 			mrq->stop->error = 0;
 			mrq->stop->mrq = mrq;
 		}
+#ifdef CONFIG_MMC_PERF_PROFILING
+		if (host->perf_enable)
+			host->perf.start = ktime_get();
+#endif
 	}
 
 	return 0;
@@ -353,13 +1049,206 @@
 	if (err)
 		return err;
 
+	mmc_host_clk_hold(host);
 	led_trigger_event(host->led, LED_FULL);
+
+	if (mmc_is_data_request(mrq)) {
+		mmc_deferred_scaling(host);
+		mmc_clk_scaling_start_busy(host, true);
+	}
+
 	__mmc_start_request(host, mrq);
 
 	return 0;
 }
 EXPORT_SYMBOL(mmc_start_request);
 
+/**
+ *	mmc_blk_init_bkops_statistics - initialize bkops statistics
+ *	@card: MMC card to start BKOPS
+ *
+ *	Initialize and enable the bkops statistics
+ */
+void mmc_blk_init_bkops_statistics(struct mmc_card *card)
+{
+	int i;
+	struct mmc_bkops_stats *stats;
+
+	if (!card)
+		return;
+
+	stats = &card->bkops.stats;
+	spin_lock(&stats->lock);
+
+	stats->manual_start = 0;
+	stats->hpi = 0;
+	stats->auto_start = 0;
+	stats->auto_stop = 0;
+	for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
+		stats->level[i] = 0;
+	stats->enabled = true;
+
+	spin_unlock(&stats->lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
+
+static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->manual_start++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->auto_start++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->auto_stop++;
+	spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
+					unsigned int level)
+{
+	WARN_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->level[level]++;
+	spin_unlock_irq(&stats->lock);
+}
+
+/*
+ *	mmc_set_auto_bkops - set auto BKOPS for supported cards
+ *	@card: MMC card to start BKOPS
+ *	@enable: enable/disable flag
+ *	Configure the card to run automatic BKOPS.
+ *
+ *	Should be called when host is claimed.
+ */
+int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
+{
+	int ret = 0;
+	u8 bkops_en;
+
+	WARN_ON(!card);
+	enable = !!enable;
+
+	if (unlikely(!mmc_card_support_auto_bkops(card))) {
+		pr_err("%s: %s: card doesn't support auto bkops\n",
+				mmc_hostname(card->host), __func__);
+		return -EPERM;
+	}
+
+	if (enable) {
+		if (mmc_card_doing_auto_bkops(card))
+			goto out;
+		bkops_en = card->ext_csd.auto_bkops_en | EXT_CSD_BKOPS_AUTO_EN;
+	} else {
+		if (!mmc_card_doing_auto_bkops(card))
+			goto out;
+		bkops_en = card->ext_csd.auto_bkops_en &
+				~EXT_CSD_BKOPS_AUTO_EN;
+	}
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
+			bkops_en, 0);
+	if (ret) {
+		pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
+			mmc_hostname(card->host), __func__, enable, ret);
+	} else {
+		if (enable) {
+			mmc_card_set_auto_bkops(card);
+			mmc_update_bkops_auto_on(&card->bkops.stats);
+		} else {
+			mmc_card_clr_auto_bkops(card);
+			mmc_update_bkops_auto_off(&card->bkops.stats);
+		}
+		card->ext_csd.auto_bkops_en = bkops_en;
+		pr_debug("%s: %s: bkops state %x\n",
+				mmc_hostname(card->host), __func__, bkops_en);
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(mmc_set_auto_bkops);
+
+/**
+ *	mmc_check_bkops - check BKOPS for supported cards
+ *	@card: MMC card to check BKOPS
+ *
+ *	Read the BKOPS status in order to determine whether the
+ *	card requires bkops to be started.
+ */
+void mmc_check_bkops(struct mmc_card *card)
+{
+	int err;
+
+	WARN_ON(!card);
+
+	if (mmc_card_doing_bkops(card))
+		return;
+
+	err = mmc_read_bkops_status(card);
+	if (err) {
+		pr_err("%s: Failed to read bkops status: %d\n",
+		       mmc_hostname(card->host), err);
+		return;
+	}
+
+	card->bkops.needs_check = false;
+
+	mmc_update_bkops_level(&card->bkops.stats,
+				card->ext_csd.raw_bkops_status);
+
+	card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
+}
+EXPORT_SYMBOL(mmc_check_bkops);
+
+/**
+ *	mmc_start_manual_bkops - start BKOPS for supported cards
+ *	@card: MMC card to start BKOPS
+ *
+ *      Send START_BKOPS to the card.
+ *      The function should be called with claimed host.
+ */
+void mmc_start_manual_bkops(struct mmc_card *card)
+{
+	int err;
+
+	WARN_ON(!card);
+
+	if (unlikely(!mmc_card_configured_manual_bkops(card)))
+		return;
+
+	if (mmc_card_doing_bkops(card))
+		return;
+
+	mmc_retune_hold(card->host);
+
+	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
+				1, 0, 0, false, true, false);
+	if (err) {
+		pr_err("%s: Error %d starting manual bkops\n",
+				mmc_hostname(card->host), err);
+	} else {
+		mmc_card_set_doing_bkops(card);
+		mmc_update_bkops_start(&card->bkops.stats);
+		card->bkops.needs_bkops = false;
+	}
+
+	mmc_retune_release(card->host);
+}
+EXPORT_SYMBOL(mmc_start_manual_bkops);
+
 static void mmc_wait_done(struct mmc_request *mrq)
 {
 	complete(&mrq->completion);
@@ -401,20 +1290,21 @@
 	struct mmc_command *cmd;
 
 	while (1) {
-		wait_for_completion(&mrq->completion);
+		wait_for_completion_io(&mrq->completion);
 
 		cmd = mrq->cmd;
 
 		/*
-		 * If host has timed out waiting for the sanitize
+		 * If host has timed out waiting for the sanitize/bkops
 		 * to complete, card might be still in programming state
 		 * so let's try to bring the card out of programming
 		 * state.
 		 */
-		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
+		if ((cmd->bkops_busy || cmd->sanitize_busy) &&
+				cmd->error == -ETIMEDOUT) {
 			if (!mmc_interrupt_hpi(host->card)) {
-				pr_warn("%s: %s: Interrupted sanitize\n",
-					mmc_hostname(host), __func__);
+				pr_warn("%s: %s: Interrupted sanitize/bkops\n",
+					   mmc_hostname(host), __func__);
 				cmd->error = 0;
 				break;
 			} else {
@@ -620,6 +1510,10 @@
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+#endif
 	__mmc_start_req(host, mrq);
 
 	if (!mrq->cap_cmd_during_tfr)
@@ -668,6 +1562,10 @@
 {
 	unsigned int mult;
 
+	if (!card) {
+		WARN_ON(1);
+		return;
+	}
 	/*
 	 * SDIO cards only define an upper 1 s limit on access.
 	 */
@@ -699,9 +1597,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		if (card->host->ios.clock)
+		if (mmc_host_clk_rate(card->host))
 			timeout_us += data->timeout_clks * 1000 /
-				(card->host->ios.clock / 1000);
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -734,9 +1632,11 @@
 	 * Address this by setting the read timeout to a "reasonably high"
 	 * value. For the cards tested, 600ms has proven enough. If necessary,
 	 * this value can be increased if other problematic cards require this.
+	 * Certain Hynix 5.x cards giving read timeout even with 300ms.
+	 * Increasing further to max value (4s).
 	 */
 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
-		data->timeout_ns = 600000000;
+		data->timeout_ns = 4000000000u;
 		data->timeout_clks = 0;
 	}
 
@@ -755,6 +1655,11 @@
 				data->timeout_ns =  100000000;	/* 100ms */
 		}
 	}
+	/* Increase the timeout values for some bad INAND MCP devices */
+	if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
+		data->timeout_ns = 4000000000u; /* 4s */
+		data->timeout_clks = 0;
+	}
 }
 EXPORT_SYMBOL(mmc_set_data_timeout);
 
@@ -856,14 +1761,60 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 	remove_wait_queue(&host->wq, &wait);
 
-	if (pm)
+	if (pm) {
+		mmc_host_clk_hold(host);
 		pm_runtime_get_sync(mmc_dev(host));
+	}
+
+	if (host->ops->enable && !stop && host->claim_cnt == 1)
+		host->ops->enable(host);
 
 	return stop;
 }
 EXPORT_SYMBOL(__mmc_claim_host);
 
 /**
+ *     mmc_try_claim_host - try exclusively to claim a host
+ *        and keep trying for given time, with a gap of 10ms
+ *     @host: mmc host to claim
+ *     @dealy_ms: delay in ms
+ *
+ *     Returns %1 if the host is claimed, %0 otherwise.
+ */
+int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
+{
+	int claimed_host = 0;
+	unsigned long flags;
+	int retry_cnt = delay_ms/10;
+	bool pm = false;
+
+	do {
+		spin_lock_irqsave(&host->lock, flags);
+		if (!host->claimed || host->claimer->task == current) {
+			host->claimed = 1;
+			host->claimer->task = current;
+			host->claim_cnt += 1;
+			claimed_host = 1;
+			if (host->claim_cnt == 1)
+				pm = true;
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+		if (!claimed_host)
+			mmc_delay(10);
+	} while (!claimed_host && retry_cnt--);
+
+	if (pm) {
+		mmc_host_clk_hold(host);
+		pm_runtime_get_sync(mmc_dev(host));
+	}
+
+	if (host->ops->enable && claimed_host && host->claim_cnt == 1)
+		host->ops->enable(host);
+	return claimed_host;
+}
+EXPORT_SYMBOL(mmc_try_claim_host);
+
+/**
  *	mmc_release_host - release a host
  *	@host: mmc host to release
  *
@@ -876,6 +1827,9 @@
 
 	WARN_ON(!host->claimed);
 
+	if (host->ops->disable && host->claim_cnt == 1)
+		host->ops->disable(host);
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (--host->claim_cnt) {
 		/* Release for nested claim */
@@ -888,6 +1842,7 @@
 		wake_up(&host->wq);
 		pm_runtime_mark_last_busy(mmc_dev(host));
 		pm_runtime_put_autosuspend(mmc_dev(host));
+		mmc_host_clk_release(host);
 	}
 }
 EXPORT_SYMBOL(mmc_release_host);
@@ -900,6 +1855,10 @@
 {
 	pm_runtime_get_sync(&card->dev);
 	__mmc_claim_host(card->host, ctx, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(card->host))
+		mmc_resume_bus(card->host);
+#endif
 }
 EXPORT_SYMBOL(mmc_get_card);
 
@@ -933,7 +1892,23 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 1 << ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
+	if (ios->old_rate != ios->clock) {
+		if (likely(ios->clk_ts)) {
+			char trace_info[80];
+
+			snprintf(trace_info, 80,
+				"%s: freq_KHz %d --> %d | t = %d",
+				mmc_hostname(host), ios->old_rate / 1000,
+				ios->clock / 1000, jiffies_to_msecs(
+					(long)jiffies - (long)ios->clk_ts));
+			trace_mmc_clk(trace_info);
+		}
+		ios->old_rate = ios->clock;
+		ios->clk_ts = jiffies;
+	}
 }
 
 /*
@@ -941,15 +1916,17 @@
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.chip_select = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
 	WARN_ON(hz && hz < host->f_min);
 
@@ -960,6 +1937,74 @@
 	mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+	mmc_host_clk_hold(host);
+	__mmc_set_clock(host, hz);
+	mmc_host_clk_release(host);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	WARN_ON(!host->ios.clock);
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		WARN_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		__mmc_set_clock(host, host->clk_old);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+
+void mmc_gate_clock(struct mmc_host *host)
+{
+}
+#endif
+
 int mmc_execute_tuning(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
@@ -977,7 +2022,9 @@
 	else
 		opcode = MMC_SEND_TUNING_BLOCK;
 
+	mmc_host_clk_hold(host);
 	err = host->ops->execute_tuning(host, opcode);
+	mmc_host_clk_release(host);
 
 	if (err)
 		pr_err("%s: tuning execution failed: %d\n",
@@ -993,8 +2040,10 @@
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_mode = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1002,8 +2051,10 @@
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_width = width;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1471,8 +2522,11 @@
 	int old_signal_voltage = host->ios.signal_voltage;
 
 	host->ios.signal_voltage = signal_voltage;
-	if (host->ops->start_signal_voltage_switch)
+	if (host->ops->start_signal_voltage_switch) {
+		mmc_host_clk_hold(host);
 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
+		mmc_host_clk_release(host);
+	}
 
 	if (err)
 		host->ios.signal_voltage = old_signal_voltage;
@@ -1519,6 +2573,7 @@
 {
 	struct mmc_command cmd = {};
 	int err = 0;
+	u32 clock;
 
 	/*
 	 * If we cannot switch voltages, return failure so the caller
@@ -1534,13 +2589,19 @@
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
+	/*
+	 * Hold the clock reference so clock doesn't get auto gated during this
+	 * voltage switch sequence.
+	 */
+	mmc_host_clk_hold(host);
 	err = mmc_wait_for_cmd(host, &cmd, 0);
 	if (err)
-		return err;
+		goto err_command;
 
-	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
-		return -EIO;
-
+	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
+		err = -EIO;
+		goto err_command;
+	}
 	/*
 	 * The card should drive cmd and dat[0:3] low immediately
 	 * after the response of cmd11, but wait 1 ms to be sure
@@ -1550,16 +2611,33 @@
 		err = -EAGAIN;
 		goto power_cycle;
 	}
+	/*
+	 * During a signal voltage level switch, the clock must be gated
+	 * for 5 ms according to the SD spec
+	 */
+	host->card_clock_off = true;
+	clock = host->ios.clock;
+	host->ios.clock = 0;
+	mmc_set_ios(host);
 
-	if (mmc_host_set_uhs_voltage(host)) {
+	if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
 		/*
 		 * Voltages may not have been switched, but we've already
 		 * sent CMD11, so a power cycle is required anyway
 		 */
 		err = -EAGAIN;
+		host->ios.clock = clock;
+		mmc_set_ios(host);
+		host->card_clock_off = false;
 		goto power_cycle;
 	}
 
+	/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+	mmc_delay(10);
+	host->ios.clock = clock;
+	mmc_set_ios(host);
+
+	host->card_clock_off = false;
 	/* Wait for at least 1 ms according to spec */
 	mmc_delay(1);
 
@@ -1577,6 +2655,9 @@
 		mmc_power_cycle(host, ocr);
 	}
 
+err_command:
+	mmc_host_clk_release(host);
+
 	return err;
 }
 
@@ -1585,8 +2666,10 @@
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+	mmc_host_clk_hold(host);
 	host->ios.timing = timing;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1594,8 +2677,10 @@
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+	mmc_host_clk_hold(host);
 	host->ios.drv_type = drv_type;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1603,6 +2688,7 @@
 {
 	struct mmc_host *host = card->host;
 	int host_drv_type = SD_DRIVER_TYPE_B;
+	int drive_strength;
 
 	*drv_type = 0;
 
@@ -1625,10 +2711,14 @@
 	 * information and let the hardware specific code
 	 * return what is possible given the options
 	 */
-	return host->ops->select_drive_strength(card, max_dtr,
-						host_drv_type,
-						card_drv_type,
-						drv_type);
+	mmc_host_clk_hold(host);
+	drive_strength = host->ops->select_drive_strength(card, max_dtr,
+							  host_drv_type,
+							  card_drv_type,
+							  drv_type);
+	mmc_host_clk_release(host);
+
+	return drive_strength;
 }
 
 /*
@@ -1647,6 +2737,8 @@
 	if (host->ios.power_mode == MMC_POWER_ON)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_pre_power_on(host);
 
 	host->ios.vdd = fls(ocr) - 1;
@@ -1674,6 +2766,8 @@
 	 * time required to reach a stable voltage.
 	 */
 	mmc_delay(host->ios.power_delay_ms);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_off(struct mmc_host *host)
@@ -1681,6 +2775,8 @@
 	if (host->ios.power_mode == MMC_POWER_OFF)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_power_off(host);
 
 	host->ios.clock = 0;
@@ -1696,6 +2792,8 @@
 	 * can be successfully turned on again.
 	 */
 	mmc_delay(1);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -1743,6 +2841,47 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+int mmc_resume_bus(struct mmc_host *host)
+{
+	unsigned long flags;
+	int err = 0;
+
+	if (!mmc_bus_needs_resume(host))
+		return -EINVAL;
+
+	pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
+	spin_lock_irqsave(&host->lock, flags);
+	host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	mmc_bus_get(host);
+	if (host->bus_ops && !host->bus_dead && host->card) {
+		mmc_power_up(host, host->card->ocr);
+		WARN_ON(!host->bus_ops->resume);
+		err = host->bus_ops->resume(host);
+		if (err) {
+			pr_err("%s: %s: resume failed: %d\n",
+				       mmc_hostname(host), __func__, err);
+			/*
+			 * If we have cd-gpio based detection mechanism and
+			 * deferred resume is supported, we will not detect
+			 * card removal event when system is suspended. So if
+			 * resume fails after a system suspend/resume,
+			 * schedule the work to detect card presence.
+			 */
+			if (mmc_card_is_removable(host) &&
+					!(host->caps & MMC_CAP_NEEDS_POLL)) {
+				mmc_detect_change(host, 0);
+			}
+		}
+	}
+
+	mmc_bus_put(host);
+	pr_debug("%s: Deferred resume completed\n", mmc_hostname(host));
+	return 0;
+}
+EXPORT_SYMBOL(mmc_resume_bus);
+
 /*
  * Assign a mmc bus handler to a host. Only one bus handler may control a
  * host at any given time.
@@ -1796,6 +2935,16 @@
 		pm_wakeup_event(mmc_dev(host), 5000);
 
 	host->detect_change = 1;
+	/*
+	 * Change in cd_gpio state, so make sure detection part is
+	 * not overided because of manual resume.
+	 */
+	if (cd_irq && mmc_bus_manual_resume(host))
+		host->ignore_bus_resume_flags = true;
+
+	if (delayed_work_pending(&host->detect))
+		cancel_delayed_work(&host->detect);
+
 	mmc_schedule_delayed_work(&host->detect, delay);
 }
 
@@ -1895,7 +3044,7 @@
 		 */
 		timeout_clks <<= 1;
 		timeout_us += (timeout_clks * 1000) /
-			      (card->host->ios.clock / 1000);
+			      (mmc_host_clk_rate(card->host) / 1000);
 
 		erase_timeout = timeout_us / 1000;
 
@@ -2156,20 +3305,9 @@
 	return nr_new;
 }
 
-/**
- * mmc_erase - erase sectors.
- * @card: card to erase
- * @from: first sector to erase
- * @nr: number of sectors to erase
- * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
- *
- * Caller must claim host before calling this function.
- */
-int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
-	      unsigned int arg)
+int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
+		unsigned int nr, unsigned int arg)
 {
-	unsigned int rem, to = from + nr;
-	int err;
 
 	if (!(card->host->caps & MMC_CAP_ERASE) ||
 	    !(card->csd.cmdclass & CCC_ERASE))
@@ -2193,6 +3331,27 @@
 		if (from % card->erase_size || nr % card->erase_size)
 			return -EINVAL;
 	}
+	return 0;
+}
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+	      unsigned int arg)
+{
+	unsigned int rem, to = from + nr;
+	int err;
+
+	err = mmc_erase_sanity_check(card, from, nr, arg);
+	if (err)
+		return err;
 
 	if (arg == MMC_ERASE_ARG)
 		nr = mmc_align_erase_size(card, &from, &to, nr);
@@ -2369,6 +3528,10 @@
 	struct mmc_host *host = card->host;
 	unsigned int max_discard, max_trim;
 
+	if (!host->max_busy_timeout ||
+		(host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
+		return UINT_MAX;
+
 	/*
 	 * Without erase_group_def set, MMC erase timeout depends on clock
 	 * frequence which can change.  In that case, the best choice is
@@ -2433,7 +3596,9 @@
 
 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
 		return;
+	mmc_host_clk_hold(host);
 	host->ops->hw_reset(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_hw_reset(struct mmc_host *host)
@@ -2597,14 +3762,31 @@
 }
 EXPORT_SYMBOL(mmc_detect_card_removed);
 
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+	flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
 void mmc_rescan(struct work_struct *work)
 {
+	unsigned long flags;
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
 	int i;
 
-	if (host->rescan_disable)
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->rescan_disable) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		return;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	/* If there is a non-removable card registered, only scan once */
 	if (!mmc_card_is_removable(host) && host->rescan_entered)
@@ -2628,6 +3810,8 @@
 		host->bus_ops->detect(host);
 
 	host->detect_change = 0;
+	if (host->ignore_bus_resume_flags)
+		host->ignore_bus_resume_flags = false;
 
 	/*
 	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
@@ -2662,6 +3846,7 @@
 		if (freqs[i] <= host->f_min)
 			break;
 	}
+	host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
 	mmc_release_host(host);
 
  out:
@@ -2671,17 +3856,17 @@
 
 void mmc_start_host(struct mmc_host *host)
 {
+	mmc_claim_host(host);
 	host->f_init = max(freqs[0], host->f_min);
 	host->rescan_disable = 0;
 	host->ios.power_mode = MMC_POWER_UNDEFINED;
 
-	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)) {
-		mmc_claim_host(host);
+	if (!(host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP))
 		mmc_power_up(host, host->ocr_avail);
-		mmc_release_host(host);
-	}
 
 	mmc_gpiod_request_cd_irq(host);
+	mmc_register_extcon(host);
+	mmc_release_host(host);
 	_mmc_detect_change(host, 0, false);
 }
 
@@ -2711,6 +3896,8 @@
 	}
 	mmc_bus_put(host);
 
+	mmc_unregister_extcon(host);
+
 	mmc_claim_host(host);
 	mmc_power_off(host);
 	mmc_release_host(host);
@@ -2770,6 +3957,11 @@
 
 		spin_lock_irqsave(&host->lock, flags);
 		host->rescan_disable = 0;
+		if (mmc_bus_manual_resume(host) &&
+				!host->ignore_bus_resume_flags) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			break;
+		}
 		spin_unlock_irqrestore(&host->lock, flags);
 		_mmc_detect_change(host, 0, false);
 
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 087ba68..c20bbe8 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -32,6 +32,7 @@
 	int (*shutdown)(struct mmc_host *);
 	int (*hw_reset)(struct mmc_host *);
 	int (*sw_reset)(struct mmc_host *);
+	int (*change_bus_speed)(struct mmc_host *host, unsigned long *freq);
 };
 
 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -44,6 +45,11 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+int mmc_clk_update_freq(struct mmc_host *host,
+		unsigned long freq, enum mmc_load state);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
@@ -64,6 +70,8 @@
 {
 	if (ms <= 20)
 		usleep_range(ms * 1000, ms * 1250);
+	else if (ms < jiffies_to_msecs(2))
+		usleep_range(ms * 1000, (ms + 1) * 1000);
 	else
 		msleep(ms);
 }
@@ -89,6 +97,12 @@
 void mmc_add_card_debugfs(struct mmc_card *card);
 void mmc_remove_card_debugfs(struct mmc_card *card);
 
+extern bool mmc_can_scale_clk(struct mmc_host *host);
+extern int mmc_init_clk_scaling(struct mmc_host *host);
+extern int mmc_resume_clk_scaling(struct mmc_host *host);
+extern int mmc_exit_clk_scaling(struct mmc_host *host);
+extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
+
 int mmc_execute_tuning(struct mmc_card *card);
 int mmc_hs200_to_hs400(struct mmc_card *card);
 int mmc_hs400_to_hs200(struct mmc_card *card);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index d2275c5..14b590f 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -33,6 +33,26 @@
 #endif /* CONFIG_FAIL_MMC_REQUEST */
 
 /* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ring_buffer_show(struct seq_file *s, void *data)
+{
+	struct mmc_host *mmc = s->private;
+
+	mmc_dump_trace_buffer(mmc, s);
+	return 0;
+}
+
+static int mmc_ring_buffer_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mmc_ring_buffer_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ring_buffer_fops = {
+	.open		= mmc_ring_buffer_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
 static int mmc_ios_show(struct seq_file *s, void *data)
 {
 	static const char *vdd_str[] = {
@@ -225,6 +245,205 @@
 DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
 	"%llu\n");
 
+#include <linux/delay.h>
+
+static int mmc_scale_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	*val = host->clk_scaling.curr_freq;
+
+	return 0;
+}
+
+static int mmc_scale_set(void *data, u64 val)
+{
+	int err = 0;
+	struct mmc_host *host = data;
+
+	mmc_claim_host(host);
+	mmc_host_clk_hold(host);
+
+	/* change frequency from sysfs manually */
+	err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
+	if (err == -EAGAIN)
+		err = 0;
+	else if (err)
+		pr_err("%s: clock scale to %llu failed with error %d\n",
+			mmc_hostname(host), val, err);
+	else
+		pr_debug("%s: clock change to %llu finished successfully (%s)\n",
+			mmc_hostname(host), val, current->comm);
+
+	mmc_host_clk_release(host);
+	mmc_release_host(host);
+
+	return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_scale_fops, mmc_scale_get, mmc_scale_set,
+	"%llu\n");
+
+static int mmc_max_clock_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*val = host->f_max;
+
+	return 0;
+}
+
+static int mmc_max_clock_set(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+	int err = -EINVAL;
+	unsigned long freq = val;
+	unsigned int old_freq;
+
+	if (!host || (val < host->f_min))
+		goto out;
+
+	mmc_claim_host(host);
+	if (host->bus_ops && host->bus_ops->change_bus_speed) {
+		old_freq = host->f_max;
+		host->f_max = freq;
+
+		err = host->bus_ops->change_bus_speed(host, &freq);
+
+		if (err)
+			host->f_max = old_freq;
+	}
+	mmc_release_host(host);
+out:
+	return err;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_max_clock_fops, mmc_max_clock_get,
+		mmc_max_clock_set, "%llu\n");
+
+static int mmc_force_err_set(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+
+	if (host && host->card && host->ops &&
+			host->ops->force_err_irq) {
+		/*
+		 * To access the force error irq reg, we need to make
+		 * sure the host is powered up and host clock is ticking.
+		 */
+		mmc_get_card(host->card, NULL);
+		host->ops->force_err_irq(host, val);
+		mmc_put_card(host->card, NULL);
+	}
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_force_err_fops, NULL, mmc_force_err_set, "%llu\n");
+
+static int mmc_err_state_get(void *data, u64 *val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*val = host->err_occurred ? 1 : 0;
+
+	return 0;
+}
+
+static int mmc_err_state_clear(void *data, u64 val)
+{
+	struct mmc_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	host->err_occurred = false;
+
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(mmc_err_state, mmc_err_state_get,
+		mmc_err_state_clear, "%llu\n");
+
+static int mmc_err_stats_show(struct seq_file *file, void *data)
+{
+	struct mmc_host *host = (struct mmc_host *)file->private;
+
+	if (!host)
+		return -EINVAL;
+
+	seq_printf(file, "# Command Timeout Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_CMD_TIMEOUT]);
+
+	seq_printf(file, "# Command CRC Errors Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_CMD_CRC]);
+
+	seq_printf(file, "# Data Timeout Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_DAT_TIMEOUT]);
+
+	seq_printf(file, "# Data CRC Errors Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_DAT_CRC]);
+
+	seq_printf(file, "# Auto-Cmd Error Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_ADMA]);
+
+	seq_printf(file, "# ADMA Error Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_ADMA]);
+
+	seq_printf(file, "# Tuning Error Occurred:\t %d\n",
+		   host->err_stats[MMC_ERR_TUNING]);
+
+	seq_printf(file, "# CMDQ RED Errors:\t\t %d\n",
+		   host->err_stats[MMC_ERR_CMDQ_RED]);
+
+	seq_printf(file, "# CMDQ GCE Errors:\t\t %d\n",
+		   host->err_stats[MMC_ERR_CMDQ_GCE]);
+
+	seq_printf(file, "# CMDQ ICCE Errors:\t\t %d\n",
+		   host->err_stats[MMC_ERR_CMDQ_ICCE]);
+
+	seq_printf(file, "# Request Timedout:\t %d\n",
+		   host->err_stats[MMC_ERR_REQ_TIMEOUT]);
+
+	seq_printf(file, "# CMDQ Request Timedout:\t %d\n",
+		   host->err_stats[MMC_ERR_CMDQ_REQ_TIMEOUT]);
+
+	seq_printf(file, "# ICE Config Errors:\t\t %d\n",
+		   host->err_stats[MMC_ERR_ICE_CFG]);
+	return 0;
+}
+
+static int mmc_err_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mmc_err_stats_show, inode->i_private);
+}
+
+static ssize_t mmc_err_stats_write(struct file *filp, const char __user *ubuf,
+				   size_t cnt, loff_t *ppos)
+{
+	struct mmc_host *host = filp->f_mapping->host->i_private;
+
+	if (!host)
+		return -EINVAL;
+
+	pr_debug("%s: Resetting MMC error statistics\n", __func__);
+	memset(host->err_stats, 0, sizeof(host->err_stats));
+
+	return cnt;
+}
+
+static const struct file_operations mmc_err_stats_fops = {
+	.open	= mmc_err_stats_open,
+	.read	= seq_read,
+	.write	= mmc_err_stats_write,
+};
+
 void mmc_add_host_debugfs(struct mmc_host *host)
 {
 	struct dentry *root;
@@ -240,16 +459,53 @@
 
 	host->debugfs_root = root;
 
-	if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
+	if (!debugfs_create_file("ios", 0400, root, host, &mmc_ios_fops))
 		goto err_node;
 
-	if (!debugfs_create_x32("caps", S_IRUSR, root, &host->caps))
+	if (!debugfs_create_file("max_clock", 0600, root, host,
+		&mmc_max_clock_fops))
 		goto err_node;
 
-	if (!debugfs_create_x32("caps2", S_IRUSR, root, &host->caps2))
+	if (!debugfs_create_file("scale", 0600, root, host,
+		&mmc_scale_fops))
 		goto err_node;
 
-	if (!debugfs_create_file("clock", S_IRUSR | S_IWUSR, root, host,
+	if (!debugfs_create_bool("skip_clk_scale_freq_update",
+		0600, root,
+		&host->clk_scaling.skip_clk_scale_freq_update))
+		goto err_node;
+
+	if (!debugfs_create_bool("crash_on_err",
+		0600, root,
+		&host->crash_on_err))
+		goto err_node;
+
+#ifdef CONFIG_MMC_RING_BUFFER
+	if (!debugfs_create_file("ring_buffer", 0400,
+				root, host, &mmc_ring_buffer_fops))
+		goto err_node;
+#endif
+	if (!debugfs_create_file("err_state", 0600, root, host,
+		&mmc_err_state))
+		goto err_node;
+
+	if (!debugfs_create_file("err_stats", 0600, root, host,
+		&mmc_err_stats_fops))
+		goto err_node;
+
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", 0600,
+				root, &host->clk_delay))
+		goto err_node;
+#endif
+
+	if (!debugfs_create_x32("caps", 0400, root, &host->caps))
+		goto err_node;
+
+	if (!debugfs_create_x32("caps2", 0400, root, &host->caps2))
+		goto err_node;
+
+	if (!debugfs_create_file("clock", 0600, root, host,
 			&mmc_clock_fops))
 		goto err_node;
 
@@ -262,6 +518,10 @@
 					     &host->fail_mmc_request)))
 		goto err_node;
 #endif
+	if (!debugfs_create_file("force_error", 0200, root, host,
+		&mmc_force_err_fops))
+		goto err_node;
+
 	return;
 
 err_node:
@@ -276,6 +536,89 @@
 	debugfs_remove_recursive(host->debugfs_root);
 }
 
+static int mmc_bkops_stats_read(struct seq_file *file, void *data)
+{
+	struct mmc_card *card = file->private;
+	struct mmc_bkops_stats *stats;
+	int i;
+
+	if (!card)
+		return -EINVAL;
+
+	stats = &card->bkops.stats;
+
+	if (!stats->enabled) {
+		pr_info("%s: bkops statistics are disabled\n",
+			 mmc_hostname(card->host));
+		goto exit;
+	}
+
+	spin_lock(&stats->lock);
+
+	seq_printf(file, "%s: bkops statistics:\n",
+			mmc_hostname(card->host));
+	seq_printf(file, "%s: BKOPS: sent START_BKOPS to device: %u\n",
+			mmc_hostname(card->host), stats->manual_start);
+	seq_printf(file, "%s: BKOPS: stopped due to HPI: %u\n",
+			mmc_hostname(card->host), stats->hpi);
+	seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 1: %u\n",
+			mmc_hostname(card->host), stats->auto_start);
+	seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 0: %u\n",
+			mmc_hostname(card->host), stats->auto_stop);
+
+	for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; ++i)
+		seq_printf(file, "%s: BKOPS: due to level %d: %u\n",
+			 mmc_hostname(card->host), i, stats->level[i]);
+
+	spin_unlock(&stats->lock);
+
+exit:
+
+	return 0;
+}
+
+static ssize_t mmc_bkops_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				      loff_t *ppos)
+{
+	struct mmc_card *card = filp->f_mapping->host->i_private;
+	int value;
+	struct mmc_bkops_stats *stats;
+	int err;
+
+	if (!card)
+		return cnt;
+
+	stats = &card->bkops.stats;
+
+	err = kstrtoint_from_user(ubuf, cnt, 0, &value);
+	if (err) {
+		pr_err("%s: %s: error parsing input from user (%d)\n",
+				mmc_hostname(card->host), __func__, err);
+		return err;
+	}
+	if (value) {
+		mmc_blk_init_bkops_statistics(card);
+	} else {
+		spin_lock(&stats->lock);
+		stats->enabled = false;
+		spin_unlock(&stats->lock);
+	}
+
+	return cnt;
+}
+
+static int mmc_bkops_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mmc_bkops_stats_read, inode->i_private);
+}
+
+static const struct file_operations mmc_dbg_bkops_stats_fops = {
+	.open		= mmc_bkops_stats_open,
+	.read		= seq_read,
+	.write		= mmc_bkops_stats_write,
+};
+
 void mmc_add_card_debugfs(struct mmc_card *card)
 {
 	struct mmc_host	*host = card->host;
@@ -295,9 +638,16 @@
 
 	card->debugfs_root = root;
 
-	if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
+	if (!debugfs_create_x32("state", 0400, root, &card->state))
 		goto err;
 
+	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) &&
+	    (mmc_card_configured_auto_bkops(card) ||
+	     mmc_card_configured_manual_bkops(card)))
+		if (!debugfs_create_file("bkops_stats", 0400, root, card,
+					 &mmc_dbg_bkops_stats_fops))
+			goto err;
+
 	return;
 
 err:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f3dc49f..a8e16b5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -24,6 +24,8 @@
 
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/ring_buffer.h>
+
 #include <linux/mmc/slot-gpio.h>
 
 #include "core.h"
@@ -34,6 +36,10 @@
 
 #define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
 
+#define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35
+#define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5
+#define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100
+
 static DEFINE_IDA(mmc_host_ida);
 
 static void mmc_host_classdev_release(struct device *dev)
@@ -43,9 +49,28 @@
 	kfree(host);
 }
 
+static int mmc_host_prepare(struct device *dev)
+{
+	/*
+	 * Since mmc_host is a virtual device, we don't have to do anything.
+	 * If we return a positive value, the pm framework will consider that
+	 * the runtime suspend and system suspend of this device is same and
+	 * will set direct_complete flag as true. We don't want this as the
+	 * mmc_host always has positive disable_depth and setting the flag
+	 * will not speed up the suspend process.
+	 * So return 0.
+	 */
+	return 0;
+}
+
+static const struct dev_pm_ops mmc_pm_ops = {
+	.prepare = mmc_host_prepare,
+};
+
 static struct class mmc_host_class = {
 	.name		= "mmc_host",
 	.dev_release	= mmc_host_classdev_release,
+	.pm		= &mmc_pm_ops,
 };
 
 int mmc_register_host_class(void)
@@ -58,6 +83,302 @@
 	class_unregister(&mmc_host_class);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clkgate_delay = value;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return count;
+}
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work.work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_hold - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_hold(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/* cancel any clock gating work scheduled by mmc_host_clk_release() */
+	cancel_delayed_work_sync(&host->clk_gate_work);
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+
+	/*
+	 * SDIO3.0 card allows the clock to be gated off so check if
+	 * that is the case or not.
+	 */
+	if (mmc_card_sdio(card) && card->cccr.async_intr_sup)
+		return true;
+
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
+}
+
+/**
+ *	mmc_host_clk_release - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_release(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work,
+				      msecs_to_jiffies(host->clkgate_delay));
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	/*
+	 * Default clock gating delay is 0ms to avoid wasting power.
+	 * This value can be tuned by writing into sysfs entry.
+	 */
+	host->clkgate_delay = 0;
+	host->clk_gated = false;
+	INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_delayed_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_hold(host);
+	if (host->clk_gate_wq)
+		destroy_workqueue(host->clk_gate_wq);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+	host->clkgate_delay_attr.show = clkgate_delay_show;
+	host->clkgate_delay_attr.store = clkgate_delay_store;
+	sysfs_attr_init(&host->clkgate_delay_attr.attr);
+	host->clkgate_delay_attr.attr.name = "clkgate_delay";
+	host->clkgate_delay_attr.attr.mode = 0644;
+	if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+		pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+				mmc_hostname(host));
+}
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+	char *wq = NULL;
+	int wq_nl;
+	bool ret = true;
+
+	wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1;
+
+	wq = kzalloc(wq_nl, GFP_KERNEL);
+	if (!wq) {
+		ret = false;
+		goto out;
+	}
+
+	snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host));
+
+	/*
+	 * Create a work queue with flag WQ_MEM_RECLAIM set for
+	 * mmc clock gate work. Because mmc thread is created with
+	 * flag PF_MEMALLOC set, kernel will check for work queue
+	 * flag WQ_MEM_RECLAIM when flush the work queue. If work
+	 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
+	 * will be triggered.
+	 */
+	host->clk_gate_wq = create_workqueue(wq);
+	if (!host->clk_gate_wq) {
+		ret = false;
+		dev_err(host->parent,
+				"failed to create clock gate work queue\n");
+	}
+
+	kfree(wq);
+out:
+	return ret;
+}
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	return false;
+}
+
+static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host)
+{
+	return true;
+}
+#endif
+
 void mmc_retune_enable(struct mmc_host *host)
 {
 	host->can_retune = 1;
@@ -65,6 +386,7 @@
 		mod_timer(&host->retune_timer,
 			  jiffies + host->retune_period * HZ);
 }
+EXPORT_SYMBOL(mmc_retune_enable);
 
 /*
  * Pause re-tuning for a small set of operations.  The pause begins after the
@@ -97,6 +419,7 @@
 	host->retune_now = 0;
 	host->need_retune = 0;
 }
+EXPORT_SYMBOL(mmc_retune_disable);
 
 void mmc_retune_timer_stop(struct mmc_host *host)
 {
@@ -390,6 +713,13 @@
 		return NULL;
 	}
 
+	if (!mmc_host_clk_gate_wq_init(host)) {
+		kfree(host);
+		return NULL;
+	}
+
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -415,6 +745,214 @@
 
 EXPORT_SYMBOL(mmc_alloc_host);
 
+static ssize_t enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host));
+}
+
+static ssize_t enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || !host->card || kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	mmc_get_card(host->card, NULL);
+
+	if (!value) {
+		/* Suspend the clock scaling and mask host capability */
+		if (host->clk_scaling.enable)
+			mmc_suspend_clk_scaling(host);
+		host->clk_scaling.enable = false;
+		host->caps2 &= ~MMC_CAP2_CLK_SCALE;
+		host->clk_scaling.state = MMC_LOAD_HIGH;
+		/* Set to max. frequency when disabling */
+		mmc_clk_update_freq(host, host->card->clk_scaling_highest,
+					host->clk_scaling.state);
+	} else if (value) {
+		/* Unmask host capability and resume scaling */
+		host->caps2 |= MMC_CAP2_CLK_SCALE;
+		if (!host->clk_scaling.enable) {
+			host->clk_scaling.enable = true;
+			mmc_resume_clk_scaling(host);
+		}
+	}
+
+	mmc_put_card(host->card, NULL);
+
+	return count;
+}
+
+static ssize_t up_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold);
+}
+
+#define MAX_PERCENTAGE	100
+static ssize_t up_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+		return -EINVAL;
+
+	host->clk_scaling.upthreshold = value;
+
+	pr_debug("%s: clkscale_up_thresh set to %lu\n",
+			mmc_hostname(host), value);
+	return count;
+}
+
+static ssize_t down_threshold_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			host->clk_scaling.downthreshold);
+}
+
+static ssize_t down_threshold_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+		return -EINVAL;
+
+	host->clk_scaling.downthreshold = value;
+
+	pr_debug("%s: clkscale_down_thresh set to %lu\n",
+			mmc_hostname(host), value);
+	return count;
+}
+
+static ssize_t polling_interval_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n",
+			host->clk_scaling.polling_delay_ms);
+}
+
+static ssize_t polling_interval_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long value;
+
+	if (!host || kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	host->clk_scaling.polling_delay_ms = value;
+
+	pr_debug("%s: clkscale_polling_delay_ms set to %lu\n",
+			mmc_hostname(host), value);
+	return count;
+}
+
+DEVICE_ATTR_RW(enable);
+DEVICE_ATTR_RW(polling_interval);
+DEVICE_ATTR_RW(up_threshold);
+DEVICE_ATTR_RW(down_threshold);
+
+static struct attribute *clk_scaling_attrs[] = {
+	&dev_attr_enable.attr,
+	&dev_attr_up_threshold.attr,
+	&dev_attr_down_threshold.attr,
+	&dev_attr_polling_interval.attr,
+	NULL,
+};
+
+static struct attribute_group clk_scaling_attr_grp = {
+	.name = "clk_scaling",
+	.attrs = clk_scaling_attrs,
+};
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+static ssize_t
+perf_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	int64_t rtime_drv, wtime_drv;
+	unsigned long rbytes_drv, wbytes_drv, flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	rbytes_drv = host->perf.rbytes_drv;
+	wbytes_drv = host->perf.wbytes_drv;
+
+	rtime_drv = ktime_to_us(host->perf.rtime_drv);
+	wtime_drv = ktime_to_us(host->perf.wtime_drv);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "Write performance at driver Level: %lu bytes in %lld microseconds. Read performance at driver Level: %lu bytes in %lld microseconds\n",
+					wbytes_drv, wtime_drv,
+					rbytes_drv, rtime_drv);
+}
+
+static ssize_t
+perf_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	int64_t value;
+	unsigned long flags;
+
+	if (kstrtou64(buf, 0, &value) < 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (!value) {
+		memset(&host->perf, 0, sizeof(host->perf));
+		host->perf_enable = false;
+	} else {
+		host->perf_enable = true;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(perf);
+
+#endif
+
+static struct attribute *dev_attrs[] = {
+#ifdef CONFIG_MMC_PERF_PROFILING
+	&dev_attr_perf.attr,
+#endif
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+
 /**
  *	mmc_add_host - initialise host hardware
  *	@host: mmc host
@@ -436,9 +974,26 @@
 
 	led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
 
+	host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD;
+	host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD;
+	host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC;
+	host->clk_scaling.skip_clk_scale_freq_update = false;
+
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_host_debugfs(host);
 #endif
+	mmc_host_clk_sysfs_init(host);
+	mmc_trace_init(host);
+
+	err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+	if (err)
+		pr_err("%s: failed to create clk scale sysfs group with err %d\n",
+				__func__, err);
+
+	err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp);
+	if (err)
+		pr_err("%s: failed to create sysfs group with err %d\n",
+							 __func__, err);
 
 	mmc_start_host(host);
 	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
@@ -467,9 +1022,14 @@
 	mmc_remove_host_debugfs(host);
 #endif
 
+	sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
+	sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bc1bd2c..00cdf6d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -19,6 +19,8 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/mmc.h>
+#include <linux/reboot.h>
+#include <trace/events/mmc.h>
 
 #include "core.h"
 #include "card.h"
@@ -336,7 +338,7 @@
 				continue;
 			if (card->ext_csd.partition_setting_completed == 0) {
 				pr_warn("%s: has partition size defined without partition complete\n",
-					mmc_hostname(card->host));
+				mmc_hostname(card->host));
 				break;
 			}
 			part_size =
@@ -520,6 +522,25 @@
 			ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
 	}
 
+	/* check whether the eMMC card supports HPI */
+	if ((ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) &&
+		!(card->quirks & MMC_QUIRK_BROKEN_HPI)) {
+		card->ext_csd.hpi = 1;
+		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+			card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+		else
+			card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+		/*
+		 * Indicate the maximum timeout to close
+		 * a command interrupted by HPI
+		 */
+		card->ext_csd.out_of_int_time =
+			ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+		pr_info("%s: Out-of-interrupt timeout is %d[ms]\n",
+				mmc_hostname(card->host),
+				card->ext_csd.out_of_int_time);
+	}
+
 	if (card->ext_csd.rev >= 5) {
 		/* Adjust production date as per JEDEC JESD84-B451 */
 		if (card->cid.year < 2010)
@@ -527,7 +548,8 @@
 
 		/* check whether the eMMC card supports BKOPS */
 		if (!mmc_card_broken_hpi(card) &&
-		    ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+		    (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
+				card->ext_csd.hpi) {
 			card->ext_csd.bkops = 1;
 			card->ext_csd.man_bkops_en =
 					(ext_csd[EXT_CSD_BKOPS_EN] &
@@ -565,6 +587,19 @@
 		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
 
 		/*
+		 * Some eMMC vendors violate eMMC 5.0 spec and set
+		 * REL_WR_SEC_C register to 0x10 to indicate the
+		 * ability of RPMB throughput improvement thus lead
+		 * to failure when TZ module write data to RPMB
+		 * partition. So check bit[4] of EXT_CSD[166] and
+		 * if it is not set then change value of REL_WR_SEC_C
+		 * to 0x1 directly ignoring value of EXT_CSD[222].
+		 */
+		if (!(card->ext_csd.rel_param &
+					EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR))
+			card->ext_csd.rel_sectors = 0x1;
+
+		/*
 		 * RPMB regions are defined in multiples of 128K.
 		 */
 		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
@@ -658,6 +693,7 @@
 
 static int mmc_read_ext_csd(struct mmc_card *card)
 {
+	struct mmc_host *host = card->host;
 	u8 *ext_csd;
 	int err;
 
@@ -666,6 +702,9 @@
 
 	err = mmc_get_ext_csd(card, &ext_csd);
 	if (err) {
+		pr_err("%s: %s: mmc_get_ext_csd() fails %d\n",
+				mmc_hostname(host), __func__, err);
+
 		/* If the host or the card can't do the switch,
 		 * fail more gracefully. */
 		if ((err != -EINVAL)
@@ -985,16 +1024,16 @@
  */
 static int mmc_select_bus_width(struct mmc_card *card)
 {
-	static unsigned ext_csd_bits[] = {
+	static const unsigned int ext_csd_bits[] = {
 		EXT_CSD_BUS_WIDTH_8,
 		EXT_CSD_BUS_WIDTH_4,
 	};
-	static unsigned bus_widths[] = {
+	static const unsigned int bus_widths[] = {
 		MMC_BUS_WIDTH_8,
 		MMC_BUS_WIDTH_4,
 	};
 	struct mmc_host *host = card->host;
-	unsigned idx, bus_width = 0;
+	unsigned int idx, bus_width = 0;
 	int err = 0;
 
 	if (!mmc_can_ext_csd(card) ||
@@ -1150,9 +1189,28 @@
 	/*
 	 * HS400 mode requires 8-bit bus width
 	 */
-	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
-	      host->ios.bus_width == MMC_BUS_WIDTH_8))
-		return 0;
+	if (card->ext_csd.strobe_support) {
+		if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+		    host->caps & MMC_CAP_8_BIT_DATA))
+			return 0;
+
+		/* For Enhance Strobe flow. For non Enhance Strobe, signal
+		 * voltage will not be set.
+		 */
+		if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+			err = mmc_set_signal_voltage(host,
+					MMC_SIGNAL_VOLTAGE_120);
+
+		if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+			err = mmc_set_signal_voltage(host,
+					MMC_SIGNAL_VOLTAGE_180);
+		if (err)
+			return err;
+	} else {
+		if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+		    host->ios.bus_width == MMC_BUS_WIDTH_8))
+			return 0;
+	}
 
 	/* Switch card to HS mode */
 	val = EXT_CSD_TIMING_HS;
@@ -1181,10 +1239,18 @@
 	if (err)
 		goto out_err;
 
+	val = EXT_CSD_DDR_BUS_WIDTH_8;
+	if (card->ext_csd.strobe_support) {
+		err = mmc_select_bus_width(card);
+		if (IS_ERR_VALUE((unsigned long)err))
+			return err;
+		val |= EXT_CSD_BUS_WIDTH_STROBE;
+	}
+
 	/* Switch card to DDR */
 	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 			 EXT_CSD_BUS_WIDTH,
-			 EXT_CSD_DDR_BUS_WIDTH_8,
+			 val,
 			 card->ext_csd.generic_cmd6_time);
 	if (err) {
 		pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
@@ -1209,6 +1275,29 @@
 	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
 	mmc_set_bus_speed(card);
 
+	if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) {
+		mmc_host_clk_hold(host);
+		err = host->ops->enhanced_strobe(host);
+		if (!err)
+			host->ios.enhanced_strobe = true;
+		mmc_host_clk_release(host);
+	} else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) &&
+			host->ops->execute_tuning) {
+		mmc_host_clk_hold(host);
+		err = host->ops->execute_tuning(host,
+				MMC_SEND_TUNING_BLOCK_HS200);
+		mmc_host_clk_release(host);
+
+		if (err)
+			pr_warn("%s: tuning execution failed\n",
+				mmc_hostname(host));
+	}
+
+	/*
+	 * Sending of CMD13 should be done after the host calibration
+	 * for enhanced_strobe or HS400 mode is completed.
+	 * Otherwise may see CMD13 timeouts or CRC errors.
+	 */
 	err = mmc_switch_status(card);
 	if (err)
 		goto out_err;
@@ -1236,10 +1325,6 @@
 	int err;
 	u8 val;
 
-	/* Reduce frequency to HS */
-	max_dtr = card->ext_csd.hs_max_dtr;
-	mmc_set_clock(host, max_dtr);
-
 	/* Switch HS400 to HS DDR */
 	val = EXT_CSD_TIMING_HS;
 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
@@ -1250,6 +1335,10 @@
 
 	mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
 
+	/* Reduce frequency to HS */
+	max_dtr = card->ext_csd.hs_max_dtr;
+	mmc_set_clock(host, max_dtr);
+
 	err = mmc_switch_status(card);
 	if (err)
 		goto out_err;
@@ -1490,6 +1579,17 @@
 	return err;
 }
 
+static int mmc_reboot_notify(struct notifier_block *notify_block,
+		unsigned long event, void *unused)
+{
+	struct mmc_card *card = container_of(
+			notify_block, struct mmc_card, reboot_notify);
+
+	card->pon_type = (event != SYS_RESTART) ? MMC_LONG_PON : MMC_SHRT_PON;
+
+	return NOTIFY_OK;
+}
+
 /*
  * Activate High Speed, HS200 or HS400ES mode if supported.
  */
@@ -1500,12 +1600,22 @@
 	if (!mmc_can_ext_csd(card))
 		goto bus_speed;
 
-	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
-		err = mmc_select_hs400es(card);
-	else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+	/* For Enhance Strobe HS400 flow */
+	if (card->ext_csd.strobe_support &&
+	    card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+	    card->host->caps & MMC_CAP_8_BIT_DATA) {
+		err = mmc_select_hs400(card);
+		if (err) {
+			pr_err("%s: %s: mmc_select_hs400 failed : %d\n",
+					mmc_hostname(card->host), __func__,
+					err);
+			err = mmc_select_hs400es(card);
+		}
+	} else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
 		err = mmc_select_hs200(card);
-	else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
+	} else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) {
 		err = mmc_select_hs(card);
+	}
 
 	if (err && err != -EBADMSG)
 		return err;
@@ -1533,12 +1643,203 @@
 	 */
 	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
 	    host->ios.bus_width == MMC_BUS_WIDTH_8)
-		if (host->ops->prepare_hs400_tuning)
-			host->ops->prepare_hs400_tuning(host, &host->ios);
+		mmc_set_timing(host, MMC_TIMING_MMC_HS400);
 
 	return mmc_execute_tuning(card);
 }
 
+static int mmc_select_hs_ddr52(struct mmc_host *host)
+{
+	int err;
+
+	mmc_select_hs(host->card);
+	err = mmc_select_bus_width(host->card);
+	if (err < 0) {
+		pr_err("%s: %s: select_bus_width failed(%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	err = mmc_select_hs_ddr(host->card);
+	mmc_set_clock(host, MMC_HIGH_52_MAX_DTR);
+
+	return err;
+}
+
+/*
+ * Scale down from HS400 to HS in order to allow frequency change.
+ * This is needed for cards that doesn't support changing frequency in HS400
+ */
+static int mmc_scale_low(struct mmc_host *host, unsigned long freq)
+{
+	int err = 0;
+
+	mmc_set_timing(host, MMC_TIMING_LEGACY);
+	mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+
+	if (host->clk_scaling.lower_bus_speed_mode &
+	    MMC_SCALING_LOWER_DDR52_MODE) {
+		err = mmc_select_hs_ddr52(host);
+		if (err)
+			pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
+			       mmc_hostname(host), __func__, err);
+		else
+			return err;
+	}
+
+	err = mmc_select_hs(host->card);
+	if (err) {
+		pr_err("%s: %s: scaling low: failed (%d)\n",
+		       mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	err = mmc_select_bus_width(host->card);
+	if (err < 0) {
+		pr_err("%s: %s: select_bus_width failed(%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	mmc_set_clock(host, freq);
+
+	return 0;
+}
+
+/*
+ * Scale UP from HS to HS200/H400
+ */
+static int mmc_scale_high(struct mmc_host *host)
+{
+	int err = 0;
+
+	if (mmc_card_ddr52(host->card)) {
+		mmc_set_timing(host, MMC_TIMING_LEGACY);
+		mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+	}
+
+	if (!host->card->ext_csd.strobe_support) {
+		if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) {
+			pr_err("%s: %s: card does not support HS200\n",
+				mmc_hostname(host), __func__);
+			WARN_ON(1);
+			return -EPERM;
+		}
+
+		err = mmc_select_hs200(host->card);
+		if (err) {
+			pr_err("%s: %s: selecting HS200 failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+			return err;
+		}
+
+		mmc_set_bus_speed(host->card);
+
+		err = mmc_hs200_tuning(host->card);
+		if (err) {
+			pr_err("%s: %s: hs200 tuning failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+			return err;
+		}
+
+		if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400)) {
+			pr_debug("%s: card does not support HS400\n",
+				mmc_hostname(host));
+			return 0;
+		}
+	}
+
+	err = mmc_select_hs400(host->card);
+	if (err) {
+		pr_err("%s: %s: select hs400 failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	return err;
+}
+
+static int mmc_set_clock_bus_speed(struct mmc_card *card, unsigned long freq)
+{
+	int err = 0;
+
+	if (freq == MMC_HS200_MAX_DTR)
+		err = mmc_scale_high(card->host);
+	else
+		err = mmc_scale_low(card->host, freq);
+
+	return err;
+}
+
+static inline unsigned long mmc_ddr_freq_accommodation(unsigned long freq)
+{
+	if (freq == MMC_HIGH_DDR_MAX_DTR)
+		return freq;
+
+	return freq/2;
+}
+
+/**
+ * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the MMC card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card. If it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ * Host is assumed to be calimed while calling this funciton.
+ */
+static int mmc_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+	int err = 0;
+	struct mmc_card *card;
+	unsigned long actual_freq;
+
+	card = host->card;
+
+	if (!card || !freq) {
+		err = -EINVAL;
+		goto out;
+	}
+	actual_freq = *freq;
+
+	WARN_ON(!host->claimed);
+
+	/*
+	 * For scaling up/down HS400 we'll need special handling,
+	 * for other timings we can simply do clock frequency change
+	 */
+	if (mmc_card_hs400(card) ||
+		(!mmc_card_hs200(host->card) && *freq == MMC_HS200_MAX_DTR)) {
+		err = mmc_set_clock_bus_speed(card, *freq);
+		if (err) {
+			pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
+				mmc_hostname(host), __func__, err, *freq);
+			goto out;
+		}
+	} else if (mmc_card_hs200(host->card)) {
+		mmc_set_clock(host, *freq);
+		err = mmc_hs200_tuning(host->card);
+		if (err) {
+			pr_warn("%s: %s: tuning execution failed %d\n",
+				mmc_hostname(card->host),
+				__func__, err);
+			mmc_set_clock(host, host->clk_scaling.curr_freq);
+		}
+	} else {
+		if (mmc_card_ddr52(host->card))
+			actual_freq = mmc_ddr_freq_accommodation(*freq);
+		mmc_set_clock(host, actual_freq);
+	}
+
+out:
+	return err;
+}
+
 /*
  * Handle the detection and initialisation of a card.
  *
@@ -1570,28 +1871,39 @@
 
 	/* The extra bit indicates that we support high capacity */
 	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
-	if (err)
+	if (err) {
+		pr_err("%s: %s: mmc_send_op_cond() fails %d\n",
+				mmc_hostname(host), __func__, err);
 		goto err;
+	}
 
 	/*
 	 * For SPI, enable CRC as appropriate.
 	 */
 	if (mmc_host_is_spi(host)) {
 		err = mmc_spi_set_crc(host, use_spi_crc);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_spi_set_crc() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto err;
+		}
 	}
 
 	/*
 	 * Fetch CID from card.
 	 */
 	err = mmc_send_cid(host, cid);
-	if (err)
+	if (err) {
+		pr_err("%s: %s: mmc_send_cid() fails %d\n",
+				mmc_hostname(host), __func__, err);
 		goto err;
+	}
 
 	if (oldcard) {
 		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
 			err = -ENOENT;
+			pr_err("%s: %s: CID memcmp failed %d\n",
+					mmc_hostname(host), __func__, err);
 			goto err;
 		}
 
@@ -1603,6 +1915,8 @@
 		card = mmc_alloc_card(host, &mmc_type);
 		if (IS_ERR(card)) {
 			err = PTR_ERR(card);
+			pr_err("%s: %s: no memory to allocate for card %d\n",
+					mmc_hostname(host), __func__, err);
 			goto err;
 		}
 
@@ -1610,6 +1924,8 @@
 		card->type = MMC_TYPE_MMC;
 		card->rca = 1;
 		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+		host->card = card;
+		card->reboot_notify.notifier_call = mmc_reboot_notify;
 	}
 
 	/*
@@ -1623,8 +1939,11 @@
 	 */
 	if (!mmc_host_is_spi(host)) {
 		err = mmc_set_relative_addr(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_set_relative_addr() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
 	}
@@ -1634,15 +1953,24 @@
 		 * Fetch CSD from card.
 		 */
 		err = mmc_send_csd(card, card->raw_csd);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_send_csd() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		err = mmc_decode_csd(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_decode_csd() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 		err = mmc_decode_cid(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_decode_cid() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 	}
 
 	/*
@@ -1657,15 +1985,21 @@
 	 */
 	if (!mmc_host_is_spi(host)) {
 		err = mmc_select_card(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_select_card() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 	}
 
 	if (!oldcard) {
 		/* Read extended CSD. */
 		err = mmc_read_ext_csd(card);
-		if (err)
+		if (err) {
+			pr_err("%s: %s: mmc_read_ext_csd() fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		/*
 		 * If doing byte addressing, check if required to do sector
@@ -1678,6 +2012,9 @@
 
 		/* Erase size depends on CSD and Extended CSD */
 		mmc_set_erase_size(card);
+
+		if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
+			mmc_card_set_blockaddr(card);
 	}
 
 	/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */
@@ -1686,8 +2023,11 @@
 				 EXT_CSD_ERASE_GROUP_DEF, 1,
 				 card->ext_csd.generic_cmd6_time);
 
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for ERASE_GRP_DEF fails %d\n",
+				mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		if (err) {
 			err = 0;
@@ -1717,8 +2057,13 @@
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
 				 card->ext_csd.part_config,
 				 card->ext_csd.part_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for PART_CONFIG fails %d\n",
+				mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
+		card->part_curr = card->ext_csd.part_config &
+				  EXT_CSD_PART_CONFIG_ACC_MASK;
 	}
 
 	/*
@@ -1729,8 +2074,11 @@
 				 EXT_CSD_POWER_OFF_NOTIFICATION,
 				 EXT_CSD_POWER_ON,
 				 card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for POWER_ON PON fails %d\n",
+				mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 
 		/*
 		 * The err can be -EBADMSG or 0,
@@ -1744,8 +2092,11 @@
 	 * Select timing interface
 	 */
 	err = mmc_select_timing(card);
-	if (err)
+	if (err) {
+		pr_err("%s: %s: mmc_select_timing() fails %d\n",
+					mmc_hostname(host), __func__, err);
 		goto free_card;
+	}
 
 	if (mmc_card_hs200(card)) {
 		err = mmc_hs200_tuning(card);
@@ -1765,6 +2116,16 @@
 		}
 	}
 
+	card->clk_scaling_lowest = host->f_min;
+	if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400) ||
+			(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200))
+		card->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
+	else if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) ||
+			(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
+		card->clk_scaling_highest = card->ext_csd.hs_max_dtr;
+	else
+		card->clk_scaling_highest = card->csd.max_dtr;
+
 	/*
 	 * Choose the power class with selected bus interface
 	 */
@@ -1777,8 +2138,11 @@
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				EXT_CSD_HPI_MGMT, 1,
 				card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
+		if (err && err != -EBADMSG) {
+			pr_err("%s: %s: mmc_switch() for HPI_MGMT fails %d\n",
+					mmc_hostname(host), __func__, err);
 			goto free_card;
+		}
 		if (err) {
 			pr_warn("%s: Enabling HPI failed\n",
 				mmc_hostname(card->host));
@@ -1790,25 +2154,48 @@
 	/*
 	 * If cache size is higher than 0, this indicates
 	 * the existence of cache and it can be turned on.
+	 * If HPI is not supported then cache shouldn't be enabled.
 	 */
-	if (!mmc_card_broken_hpi(card) &&
-	    card->ext_csd.cache_size > 0) {
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				EXT_CSD_CACHE_CTRL, 1,
-				card->ext_csd.generic_cmd6_time);
-		if (err && err != -EBADMSG)
-			goto free_card;
+	if (!mmc_card_broken_hpi(card) && card->ext_csd.cache_size > 0) {
+		if (card->ext_csd.hpi_en &&
+			(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_CACHE_CTRL, 1,
+					card->ext_csd.generic_cmd6_time);
+			if (err && err != -EBADMSG) {
+				pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
+					mmc_hostname(host), __func__, err);
+				goto free_card;
+			}
 
-		/*
-		 * Only if no error, cache is turned on successfully.
-		 */
-		if (err) {
-			pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
-				mmc_hostname(card->host), err);
-			card->ext_csd.cache_ctrl = 0;
-			err = 0;
+			/*
+			 * Only if no error, cache is turned on successfully.
+			 */
+			if (err) {
+				pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
+					mmc_hostname(card->host), err);
+				card->ext_csd.cache_ctrl = 0;
+				err = 0;
+			} else {
+				card->ext_csd.cache_ctrl = 1;
+			}
 		} else {
-			card->ext_csd.cache_ctrl = 1;
+			/*
+			 * mmc standard doesn't say what is the card default
+			 * value for EXT_CSD_CACHE_CTRL.
+			 * Hence, cache may be enabled by default by
+			 * card vendors.
+			 * Thus, it is best to explicitly disable cache in case
+			 * we want to avoid cache.
+			 */
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_CACHE_CTRL, 0,
+					card->ext_csd.generic_cmd6_time);
+			if (err) {
+				pr_err("%s: %s: fail on CACHE_CTRL OFF %d\n",
+					mmc_hostname(host), __func__, err);
+				goto free_card;
+			}
 		}
 	}
 
@@ -1859,37 +2246,90 @@
 	if (!oldcard)
 		host->card = card;
 
+	/*
+	 * Start auto bkops, if supported.
+	 *
+	 * Note: This leaves the possibility of having both manual and
+	 * auto bkops running in parallel. The runtime implementation
+	 * will allow this, but ignore bkops exceptions on the premises
+	 * that auto bkops will eventually kick in and the device will
+	 * handle bkops without START_BKOPS from the host.
+	 */
+	if (mmc_card_support_auto_bkops(card)) {
+		/*
+		 * Ignore the return value of setting auto bkops.
+		 * If it failed, will run in backward compatible mode.
+		 */
+		(void)mmc_set_auto_bkops(card, true);
+	}
+
 	return 0;
 
 free_card:
-	if (!oldcard)
+	if (!oldcard) {
+		host->card = NULL;
 		mmc_remove_card(card);
+	}
 err:
 	return err;
 }
 
-static int mmc_can_sleep(struct mmc_card *card)
+static int mmc_can_sleepawake(struct mmc_host *host)
 {
-	return (card && card->ext_csd.rev >= 3);
+	return host && (host->caps2 & MMC_CAP2_SLEEP_AWAKE) &&
+			host->card && (host->card->ext_csd.rev >= 3);
 }
 
-static int mmc_sleep(struct mmc_host *host)
+static int mmc_sleepawake(struct mmc_host *host, bool sleep)
 {
 	struct mmc_command cmd = {};
 	struct mmc_card *card = host->card;
-	unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+	unsigned int timeout_ms;
 	int err;
 
+	if (!card) {
+		pr_err("%s: %s: invalid card\n", mmc_hostname(host), __func__);
+		return -EINVAL;
+	}
+
+	timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+	if (card->ext_csd.rev >= 3 &&
+		card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+		u8 part_config = card->ext_csd.part_config;
+
+		/*
+		 * If the last access before suspend is RPMB access, then
+		 * switch to default part config so that sleep command CMD5
+		 * and deselect CMD7 can be sent to the card.
+		 */
+		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_PART_CONFIG,
+				 part_config,
+				 card->ext_csd.part_time);
+		if (err) {
+			pr_err("%s: %s: failed to switch to default part config %x\n",
+				mmc_hostname(host), __func__, part_config);
+			return err;
+		}
+		card->ext_csd.part_config = part_config;
+		card->part_curr = card->ext_csd.part_config &
+				  EXT_CSD_PART_CONFIG_ACC_MASK;
+	}
+
 	/* Re-tuning can't be done once the card is deselected */
 	mmc_retune_hold(host);
 
-	err = mmc_deselect_cards(host);
-	if (err)
-		goto out_release;
+	if (sleep) {
+		err = mmc_deselect_cards(host);
+		if (err)
+			goto out_release;
+	}
 
 	cmd.opcode = MMC_SLEEP_AWAKE;
 	cmd.arg = card->rca << 16;
-	cmd.arg |= 1 << 15;
+	if (sleep)
+		cmd.arg |= 1 << 15;
 
 	/*
 	 * If the max_busy_timeout of the host is specified, validate it against
@@ -1917,6 +2357,9 @@
 	if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
 		mmc_delay(timeout_ms);
 
+	if (!sleep)
+		err = mmc_select_card(card);
+
 out_release:
 	mmc_retune_release(host);
 	return err;
@@ -1951,13 +2394,40 @@
 	return err;
 }
 
+int mmc_send_pon(struct mmc_card *card)
+{
+	int err = 0;
+	struct mmc_host *host = card->host;
+
+	if (!mmc_can_poweroff_notify(card))
+		goto out;
+
+	mmc_get_card(card, NULL);
+	if (card->pon_type & MMC_LONG_PON)
+		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
+	else if (card->pon_type & MMC_SHRT_PON)
+		err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+	if (err)
+		pr_warn("%s: error %d sending PON type %u\n",
+			mmc_hostname(host), err, card->pon_type);
+	mmc_put_card(card, NULL);
+out:
+	return err;
+}
+
 /*
  * Host is being removed. Free up the current card.
  */
 static void mmc_remove(struct mmc_host *host)
 {
+	unregister_reboot_notifier(&host->card->reboot_notify);
+
+	mmc_exit_clk_scaling(host);
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
@@ -1994,11 +2464,79 @@
 	}
 }
 
+static int mmc_cache_card_ext_csd(struct mmc_host *host)
+{
+	int err;
+	u8 *ext_csd;
+	struct mmc_card *card = host->card;
+
+	err = mmc_get_ext_csd(card, &ext_csd);
+	if (err || !ext_csd) {
+		pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	/* only cache read/write fields that the sw changes */
+	card->ext_csd.raw_ext_csd_cmdq = ext_csd[EXT_CSD_CMDQ_MODE_EN];
+	card->ext_csd.raw_ext_csd_cache_ctrl = ext_csd[EXT_CSD_CACHE_CTRL];
+	card->ext_csd.raw_ext_csd_bus_width = ext_csd[EXT_CSD_BUS_WIDTH];
+	card->ext_csd.raw_ext_csd_hs_timing = ext_csd[EXT_CSD_HS_TIMING];
+
+	kfree(ext_csd);
+
+	return 0;
+}
+
+static int mmc_test_awake_ext_csd(struct mmc_host *host)
+{
+	int err;
+	u8 *ext_csd;
+	struct mmc_card *card = host->card;
+
+	err = mmc_get_ext_csd(card, &ext_csd);
+	if (err || !ext_csd) {
+		pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
+
+	/* only compare read/write fields that the sw changes */
+	pr_debug("%s: %s: type(cached:current) cmdq(%d:%d) cache_ctrl(%d:%d) bus_width (%d:%d) timing(%d:%d)\n",
+		mmc_hostname(host), __func__,
+		card->ext_csd.raw_ext_csd_cmdq,
+		ext_csd[EXT_CSD_CMDQ_MODE_EN],
+		card->ext_csd.raw_ext_csd_cache_ctrl,
+		ext_csd[EXT_CSD_CACHE_CTRL],
+		card->ext_csd.raw_ext_csd_bus_width,
+		ext_csd[EXT_CSD_BUS_WIDTH],
+		card->ext_csd.raw_ext_csd_hs_timing,
+		ext_csd[EXT_CSD_HS_TIMING]);
+
+	err = !((card->ext_csd.raw_ext_csd_cmdq ==
+			ext_csd[EXT_CSD_CMDQ_MODE_EN]) &&
+		(card->ext_csd.raw_ext_csd_cache_ctrl ==
+			ext_csd[EXT_CSD_CACHE_CTRL]) &&
+		(card->ext_csd.raw_ext_csd_bus_width ==
+			ext_csd[EXT_CSD_BUS_WIDTH]) &&
+		(card->ext_csd.raw_ext_csd_hs_timing ==
+			ext_csd[EXT_CSD_HS_TIMING]));
+
+	kfree(ext_csd);
+
+	return err;
+}
+
 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
 {
 	int err = 0;
-	unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
-					EXT_CSD_POWER_OFF_LONG;
+
+	err = mmc_suspend_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+		return err;
+	}
 
 	mmc_claim_host(host);
 
@@ -2015,20 +2553,90 @@
 	if (err)
 		goto out;
 
-	if (mmc_can_poweroff_notify(host->card) &&
-		((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
-		err = mmc_poweroff_notify(host->card, notify_type);
-	else if (mmc_can_sleep(host->card))
-		err = mmc_sleep(host);
-	else if (!mmc_host_is_spi(host))
+	if (mmc_can_sleepawake(host)) {
+		/*
+		 * For caching host->ios to cached_ios we need to
+		 * make sure that clocks are not gated otherwise
+		 * cached_ios->clock will be 0.
+		 */
+		mmc_host_clk_hold(host);
+		memcpy(&host->cached_ios, &host->ios,
+			sizeof(host->cached_ios));
+		mmc_cache_card_ext_csd(host);
+		err = mmc_sleepawake(host, true);
+		mmc_host_clk_release(host);
+	} else if (!mmc_host_is_spi(host)) {
 		err = mmc_deselect_cards(host);
-
-	if (!err) {
-		mmc_power_off(host);
-		mmc_card_set_suspended(host->card);
 	}
+
+	if (err)
+		goto out;
+
+	mmc_power_off(host);
+	mmc_card_set_suspended(host->card);
 out:
 	mmc_release_host(host);
+	if (err)
+		mmc_resume_clk_scaling(host);
+	return err;
+}
+
+static int mmc_partial_init(struct mmc_host *host)
+{
+	int err = 0;
+	struct mmc_card *card = host->card;
+
+	pr_debug("%s: %s: starting partial init\n",
+		mmc_hostname(host), __func__);
+
+	mmc_set_bus_width(host, host->cached_ios.bus_width);
+	mmc_set_timing(host, host->cached_ios.timing);
+	mmc_set_clock(host, host->cached_ios.clock);
+	mmc_set_bus_mode(host, host->cached_ios.bus_mode);
+
+	mmc_host_clk_hold(host);
+
+	if (mmc_card_hs400(card)) {
+		if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
+			err = host->ops->enhanced_strobe(host);
+		else if (host->ops->execute_tuning)
+			err = host->ops->execute_tuning(host,
+				MMC_SEND_TUNING_BLOCK_HS200);
+	} else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
+		err = host->ops->execute_tuning(host,
+			MMC_SEND_TUNING_BLOCK_HS200);
+		if (err)
+			pr_warn("%s: %s: tuning execution failed (%d)\n",
+				mmc_hostname(host), __func__, err);
+	}
+
+	/*
+	 * The ext_csd is read to make sure the card did not went through
+	 * Power-failure during sleep period.
+	 * A subset of the W/E_P, W/C_P register will be tested. In case
+	 * these registers values are different from the values that were
+	 * cached during suspend, we will conclude that a Power-failure occurred
+	 * and will do full initialization sequence.
+	 * In addition, full init sequence also transfer ext_csd before moving
+	 * to CMDQ mode which has a side affect of configuring SDHCI registers
+	 * which needed to be done before moving to CMDQ mode. The same
+	 * registers need to be configured for partial init.
+	 */
+	err = mmc_test_awake_ext_csd(host);
+	if (err) {
+		pr_debug("%s: %s: fail on ext_csd read (%d)\n",
+			mmc_hostname(host), __func__, err);
+		goto out;
+	}
+	pr_debug("%s: %s: reading and comparing ext_csd successful\n",
+		mmc_hostname(host), __func__);
+
+out:
+	mmc_host_clk_release(host);
+
+	pr_debug("%s: %s: done partial init (%d)\n",
+		mmc_hostname(host), __func__, err);
+
 	return err;
 }
 
@@ -2038,13 +2646,18 @@
 static int mmc_suspend(struct mmc_host *host)
 {
 	int err;
+	ktime_t start = ktime_get();
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	err = _mmc_suspend(host, true);
 	if (!err) {
 		pm_runtime_disable(&host->card->dev);
 		pm_runtime_set_suspended(&host->card->dev);
 	}
 
+	trace_mmc_suspend(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 	return err;
 }
 
@@ -2055,18 +2668,53 @@
 static int _mmc_resume(struct mmc_host *host)
 {
 	int err = 0;
+	int retries;
 
 	mmc_claim_host(host);
 
-	if (!mmc_card_suspended(host->card))
+	if (!mmc_card_suspended(host->card)) {
+		mmc_release_host(host);
 		goto out;
+	}
 
 	mmc_power_up(host, host->card->ocr);
-	err = mmc_init_card(host, host->card->ocr, host->card);
+	retries = 3;
+	while (retries) {
+		if (mmc_can_sleepawake(host)) {
+			err = mmc_sleepawake(host, false);
+			if (!err)
+				err = mmc_partial_init(host);
+			if (err)
+				pr_err("%s: %s: awake failed (%d), fallback to full init\n",
+					mmc_hostname(host), __func__, err);
+		}
+
+		if (err)
+			err = mmc_init_card(host, host->card->ocr, host->card);
+
+		if (err) {
+			pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
+				mmc_hostname(host), err, retries);
+			retries--;
+			mmc_power_off(host);
+			usleep_range(5000, 5500);
+			mmc_power_up(host, host->card->ocr);
+			mmc_select_voltage(host, host->card->ocr);
+			continue;
+		}
+		break;
+	}
+
 	mmc_card_clr_suspended(host->card);
 
-out:
 	mmc_release_host(host);
+
+	err = mmc_resume_clk_scaling(host);
+	if (err)
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+
+out:
 	return err;
 }
 
@@ -2075,20 +2723,18 @@
  */
 static int mmc_shutdown(struct mmc_host *host)
 {
-	int err = 0;
+	struct mmc_card *card = host->card;
 
 	/*
-	 * In a specific case for poweroff notify, we need to resume the card
-	 * before we can shutdown it properly.
+	 * Exit clock scaling so that it doesn't kick in after
+	 * power off notification is sent
 	 */
-	if (mmc_can_poweroff_notify(host->card) &&
-		!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
-		err = _mmc_resume(host);
-
-	if (!err)
-		err = _mmc_suspend(host, false);
-
-	return err;
+	if (host->caps2 & MMC_CAP2_CLK_SCALE)
+		mmc_exit_clk_scaling(card->host);
+	/* send power off notification */
+	if (mmc_card_mmc(card))
+		mmc_send_pon(card);
+	return 0;
 }
 
 /*
@@ -2096,8 +2742,60 @@
  */
 static int mmc_resume(struct mmc_host *host)
 {
+	int err = 0;
+
+	MMC_TRACE(host, "%s: Enter\n", __func__);
+	err = _mmc_resume(host);
+	pm_runtime_set_active(&host->card->dev);
+	pm_runtime_mark_last_busy(&host->card->dev);
 	pm_runtime_enable(&host->card->dev);
-	return 0;
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
+
+	return err;
+}
+
+#define MAX_DEFER_SUSPEND_COUNTER 20
+static bool mmc_process_bkops(struct mmc_host *host)
+{
+	int err = 0;
+	bool is_running = false;
+	u32 status;
+
+	mmc_claim_host(host);
+
+	if (mmc_card_doing_bkops(host->card)) {
+		/* check that manual bkops finished */
+		err = mmc_send_status(host->card, &status);
+		if (err) {
+			pr_err("%s: Get card status fail\n", __func__);
+			goto unhalt;
+		}
+		if (R1_CURRENT_STATE(status) != R1_STATE_PRG) {
+			mmc_card_clr_doing_bkops(host->card);
+			goto unhalt;
+		}
+	} else {
+		mmc_check_bkops(host->card);
+	}
+
+	if (host->card->bkops.needs_bkops &&
+			!mmc_card_support_auto_bkops(host->card))
+		mmc_start_manual_bkops(host->card);
+
+unhalt:
+	mmc_release_host(host);
+
+	if (host->card->bkops.needs_bkops ||
+			mmc_card_doing_bkops(host->card)) {
+		if (host->card->bkops.retry_counter++ <
+				MAX_DEFER_SUSPEND_COUNTER) {
+			host->card->bkops.needs_check = true;
+			is_running = true;
+		} else {
+			host->card->bkops.retry_counter = 0;
+		}
+	}
+	return is_running;
 }
 
 /*
@@ -2106,15 +2804,25 @@
 static int mmc_runtime_suspend(struct mmc_host *host)
 {
 	int err;
+	ktime_t start = ktime_get();
 
 	if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
 		return 0;
 
+	if (mmc_process_bkops(host)) {
+		pm_runtime_mark_last_busy(&host->card->dev);
+		pr_debug("%s: defered, need bkops\n", __func__);
+		return -EBUSY;
+	}
+
+	MMC_TRACE(host, "%s\n", __func__);
 	err = _mmc_suspend(host, true);
 	if (err)
 		pr_err("%s: error %d doing aggressive suspend\n",
 			mmc_hostname(host), err);
 
+	trace_mmc_runtime_suspend(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
 	return err;
 }
 
@@ -2124,13 +2832,18 @@
 static int mmc_runtime_resume(struct mmc_host *host)
 {
 	int err;
+	ktime_t start = ktime_get();
 
+	MMC_TRACE(host, "%s\n", __func__);
 	err = _mmc_resume(host);
 	if (err && err != -ENOMEDIUM)
 		pr_err("%s: error %d doing runtime resume\n",
 			mmc_hostname(host), err);
 
-	return 0;
+	trace_mmc_runtime_resume(mmc_hostname(host), err,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+
+	return err;
 }
 
 static int mmc_can_reset(struct mmc_card *card)
@@ -2146,6 +2859,7 @@
 static int _mmc_hw_reset(struct mmc_host *host)
 {
 	struct mmc_card *card = host->card;
+	int ret;
 
 	/*
 	 * In the case of recovery, we can't expect flushing the cache to work
@@ -2155,17 +2869,25 @@
 
 	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
 	     mmc_can_reset(card)) {
+		mmc_host_clk_hold(host);
 		/* If the card accept RST_n signal, send it. */
 		mmc_set_clock(host, host->f_init);
 		host->ops->hw_reset(host);
 		/* Set initial state and call mmc_set_ios */
 		mmc_set_initial_state(host);
+		mmc_host_clk_release(host);
 	} else {
 		/* Do a brute force power cycle */
 		mmc_power_cycle(host, card->ocr);
 		mmc_pwrseq_reset(host);
 	}
-	return mmc_init_card(host, card->ocr, card);
+
+	ret = mmc_init_card(host, host->card->ocr, host->card);
+	if (ret)
+		pr_err("%s: %s: mmc_init_card failed (%d)\n",
+			mmc_hostname(host), __func__, ret);
+
+	return ret;
 }
 
 static const struct mmc_bus_ops mmc_ops = {
@@ -2176,6 +2898,7 @@
 	.runtime_suspend = mmc_runtime_suspend,
 	.runtime_resume = mmc_runtime_resume,
 	.alive = mmc_alive,
+	.change_bus_speed = mmc_change_bus_speed,
 	.shutdown = mmc_shutdown,
 	.hw_reset = _mmc_hw_reset,
 };
@@ -2234,6 +2957,14 @@
 		goto remove_card;
 
 	mmc_claim_host(host);
+	err = mmc_init_clk_scaling(host);
+	if (err) {
+		mmc_release_host(host);
+		goto remove_card;
+	}
+
+	register_reboot_notifier(&host->card->reboot_notify);
+
 	return 0;
 
 remove_card:
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 873b2aa..140751c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -55,6 +55,14 @@
 	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
 };
 
+static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
+{
+	spin_lock_irq(&stats->lock);
+	if (stats->enabled)
+		stats->hpi++;
+	spin_unlock_irq(&stats->lock);
+}
+
 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
 {
 	int err;
@@ -455,6 +463,7 @@
 	u32 status = 0;
 	bool expired = false;
 	bool busy = false;
+	int retries = 5;
 
 	/* We have an unspecified cmd timeout, use the fallback value. */
 	if (!timeout_ms)
@@ -496,9 +505,16 @@
 
 		/* Timeout if the device still remains busy. */
 		if (expired && busy) {
-			pr_err("%s: Card stuck being busy! %s\n",
-				mmc_hostname(host), __func__);
-			return -ETIMEDOUT;
+			pr_err("%s: Card stuck being busy! %s, timeout:%ums, retries:%d\n",
+				mmc_hostname(host), __func__,
+				timeout_ms, retries);
+			if (retries)
+				timeout = jiffies +
+					msecs_to_jiffies(timeout_ms);
+			else {
+				return -ETIMEDOUT;
+			}
+			retries--;
 		}
 	} while (busy);
 
@@ -506,6 +522,36 @@
 }
 
 /**
+ *	mmc_prepare_switch - helper; prepare to modify EXT_CSD register
+ *	@card: the MMC card associated with the data transfer
+ *	@set: cmd set values
+ *	@index: EXT_CSD register index
+ *	@value: value to program into EXT_CSD register
+ *	@tout_ms: timeout (ms) for operation performed by register write,
+ *                   timeout of zero implies maximum possible timeout
+ *	@use_busy_signal: use the busy signal as response type
+ *
+ *	Helper to prepare to modify EXT_CSD register for selected card.
+ */
+
+static inline void mmc_prepare_switch(struct mmc_command *cmd, u8 index,
+				      u8 value, u8 set, unsigned int tout_ms,
+				      bool use_busy_signal)
+{
+	cmd->opcode = MMC_SWITCH;
+	cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+		  (index << 16) |
+		  (value << 8) |
+		  set;
+	cmd->flags = MMC_CMD_AC;
+	cmd->busy_timeout = tout_ms;
+	if (use_busy_signal)
+		cmd->flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+	else
+		cmd->flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+}
+
+/**
  *	__mmc_switch - modify EXT_CSD register
  *	@card: the MMC card associated with the data transfer
  *	@set: cmd set values
@@ -542,25 +588,13 @@
 		(timeout_ms > host->max_busy_timeout))
 		use_r1b_resp = false;
 
-	cmd.opcode = MMC_SWITCH;
-	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
-		  (index << 16) |
-		  (value << 8) |
-		  set;
-	cmd.flags = MMC_CMD_AC;
-	if (use_r1b_resp) {
-		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
-		/*
-		 * A busy_timeout of zero means the host can decide to use
-		 * whatever value it finds suitable.
-		 */
-		cmd.busy_timeout = timeout_ms;
-	} else {
-		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
-	}
+	mmc_prepare_switch(&cmd, index, value, set, timeout_ms,
+			   use_r1b_resp);
 
 	if (index == EXT_CSD_SANITIZE_START)
 		cmd.sanitize_busy = true;
+	else if (index == EXT_CSD_BKOPS_START)
+		cmd.bkops_busy = true;
 
 	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 	if (err)
@@ -754,7 +788,10 @@
 
 	data.sg = &sg;
 	data.sg_len = 1;
+	data.timeout_ns = 1000000;
+	data.timeout_clks = 0;
 	mmc_set_data_timeout(&data, card);
+
 	sg_init_one(&sg, data_buf, len);
 	mmc_wait_for_req(host, &mrq);
 	err = 0;
@@ -802,7 +839,7 @@
 	unsigned int opcode;
 	int err;
 
-	if (!card->ext_csd.hpi) {
+	if (!card->ext_csd.hpi_en) {
 		pr_warn("%s: Card didn't support HPI command\n",
 			mmc_hostname(card->host));
 		return -EINVAL;
@@ -819,7 +856,7 @@
 
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err) {
-		pr_warn("%s: error %d interrupting operation. "
+		pr_debug("%s: error %d interrupting operation. "
 			"HPI command response %#x\n", mmc_hostname(card->host),
 			err, cmd.resp[0]);
 		return err;
@@ -884,8 +921,13 @@
 
 		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
 			break;
-		if (time_after(jiffies, prg_wait))
-			err = -ETIMEDOUT;
+		if (time_after(jiffies, prg_wait)) {
+			err = mmc_send_status(card, &status);
+			if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
+				err = -ETIMEDOUT;
+			else
+				break;
+		}
 	} while (!err);
 
 out:
@@ -910,6 +952,11 @@
 {
 	int err = 0;
 
+	if (unlikely(!mmc_card_configured_manual_bkops(card)))
+		goto out;
+	if (!mmc_card_doing_bkops(card))
+		goto out;
+
 	err = mmc_interrupt_hpi(card);
 
 	/*
@@ -918,14 +965,16 @@
 	 */
 	if (!err || (err == -EINVAL)) {
 		mmc_card_clr_doing_bkops(card);
+		mmc_update_bkops_hpi(&card->bkops.stats);
 		mmc_retune_release(card->host);
 		err = 0;
 	}
-
+out:
 	return err;
 }
+EXPORT_SYMBOL(mmc_stop_bkops);
 
-static int mmc_read_bkops_status(struct mmc_card *card)
+int mmc_read_bkops_status(struct mmc_card *card)
 {
 	int err;
 	u8 *ext_csd;
@@ -934,11 +983,17 @@
 	if (err)
 		return err;
 
-	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
-	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
+		MMC_BKOPS_URGENCY_MASK;
+	card->ext_csd.raw_exception_status =
+		ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+					(EXT_CSD_URGENT_BKOPS |
+					 EXT_CSD_DYNCAP_NEEDED |
+					 EXT_CSD_SYSPOOL_EXHAUSTED);
 	kfree(ext_csd);
 	return 0;
 }
+EXPORT_SYMBOL(mmc_read_bkops_status);
 
 /**
  *	mmc_start_bkops - start BKOPS for supported cards
@@ -1014,12 +1069,23 @@
 
 	if (mmc_card_mmc(card) &&
 			(card->ext_csd.cache_size > 0) &&
-			(card->ext_csd.cache_ctrl & 1)) {
+			(card->ext_csd.cache_ctrl & 1) &&
+			(!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				EXT_CSD_FLUSH_CACHE, 1, 0);
-		if (err)
+		if (err == -ETIMEDOUT) {
+			pr_err("%s: cache flush timeout\n",
+					mmc_hostname(card->host));
+			err = mmc_interrupt_hpi(card);
+			if (err) {
+				pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
+						mmc_hostname(card->host), err);
+				err = -ENODEV;
+			}
+		} else if (err) {
 			pr_err("%s: cache flush error %d\n",
 					mmc_hostname(card->host), err);
+		}
 	}
 
 	return err;
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index a1390d4..7e317e9 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -45,6 +45,7 @@
 int mmc_flush_cache(struct mmc_card *card);
 int mmc_cmdq_enable(struct mmc_card *card);
 int mmc_cmdq_disable(struct mmc_card *card);
+int mmc_read_bkops_status(struct mmc_card *card);
 
 #endif
 
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index ef18dae..4fd9ebf 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -3114,7 +3114,8 @@
 	}
 
 #ifdef CONFIG_HIGHMEM
-	__free_pages(test->highmem, BUFFER_ORDER);
+	if (test->highmem)
+		__free_pages(test->highmem, BUFFER_ORDER);
 #endif
 	kfree(test->buffer);
 	kfree(test);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 6edffee..da130a5 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -206,8 +206,12 @@
 			      gfp_t gfp)
 {
 	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
-	struct mmc_card *card = mq->card;
-	struct mmc_host *host = card->host;
+	struct mmc_host *host;
+
+	if (!mq)
+		return -ENODEV;
+
+	host = mq->card->host;
 
 	mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
 	if (!mq_rq->sg)
@@ -493,7 +497,8 @@
 	if (blk_queue_quiesced(q))
 		blk_mq_unquiesce_queue(q);
 
-	blk_cleanup_queue(q);
+	if (likely(!blk_queue_dead(q)))
+		blk_cleanup_queue(q);
 
 	/*
 	 * A request can be completed before the next request, potentially
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index dd2f73a..987e18a 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -10,6 +10,10 @@
  *
  */
 
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mmc/card.h>
 #include <linux/mmc/sdio_ids.h>
 
 #include "card.h"
@@ -51,6 +55,16 @@
 		  MMC_QUIRK_BLK_NO_CMD23),
 	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY,
+		  add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD),
+
+	/*
+	 * Some SD cards lockup while using CMD23 multiblock transfers.
+	 */
+	MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
+		  MMC_QUIRK_BLK_NO_CMD23),
 
 	/*
 	 * Some SD cards lockup while using CMD23 multiblock transfers.
@@ -69,6 +83,20 @@
 		  MMC_QUIRK_LONG_READ_TIME),
 
 	/*
+	 * Some Samsung MMC cards need longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
+	 * Hynix eMMC cards need longer data read timeout than
+	 * indicated in CSD.
+	 */
+	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
 	 * On these Samsung MoviNAND parts, performing secure erase or
 	 * secure trim can result in unrecoverable corruption due to a
 	 * firmware bug.
@@ -99,6 +127,10 @@
 	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
 		  MMC_QUIRK_TRIM_BROKEN),
 
+	/* Some INAND MCP devices advertise incorrect timeout values */
+	MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
+		MMC_QUIRK_INAND_DATA_TIMEOUT),
+
 	END_FIXUP
 };
 
@@ -138,12 +170,134 @@
 	END_FIXUP
 };
 
+#ifndef SDIO_VENDOR_ID_TI
+#define SDIO_VENDOR_ID_TI		0x0097
+#endif
+
+#ifndef SDIO_DEVICE_ID_TI_WL1271
+#define SDIO_DEVICE_ID_TI_WL1271	0x4076
+#endif
+
+#ifndef SDIO_VENDOR_ID_STE
+#define SDIO_VENDOR_ID_STE		0x0020
+#endif
+
+#ifndef SDIO_DEVICE_ID_STE_CW1200
+#define SDIO_DEVICE_ID_STE_CW1200	0x2280
+#endif
+
+#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0
+#define SDIO_DEVICE_ID_MARVELL_8797_F0	0x9128
+#endif
+
+#ifndef SDIO_VENDOR_ID_MSM
+#define SDIO_VENDOR_ID_MSM		0x0070
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_WCN1314
+#define SDIO_DEVICE_ID_MSM_WCN1314	0x2881
+#endif
+
+#ifndef SDIO_VENDOR_ID_MSM_QCA
+#define SDIO_VENDOR_ID_MSM_QCA		0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_1	0x300
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_2	0x301
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_1	0x400
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_2	0x401
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA6574
+#define SDIO_VENDOR_ID_QCA6574		0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA6574
+#define SDIO_DEVICE_ID_QCA6574		0x50a
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA9377
+#define SDIO_VENDOR_ID_QCA9377		0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA9377
+#define SDIO_DEVICE_ID_QCA9377		0x701
+#endif
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+	if (mmc_card_sdio(card))
+		card->quirks |= data;
+}
+
+static const struct mmc_fixup mmc_fixup_methods[] = {
+	/* by default sdio devices are considered CLK_GATING broken */
+	/* good cards will be whitelisted as they are tested */
+	SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+		   add_quirk_for_sdio_devices,
+		   MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM, SDIO_DEVICE_ID_MSM_WCN1314,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_1,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_2,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_1,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_2,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   add_quirk, MMC_QUIRK_DISABLE_CD),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
+		   add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
+		   add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_QCA6574, SDIO_DEVICE_ID_QCA6574,
+		   add_quirk, MMC_QUIRK_QCA6574_SETTINGS),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_QCA9377, SDIO_DEVICE_ID_QCA9377,
+		add_quirk, MMC_QUIRK_QCA9377_SETTINGS),
+	END_FIXUP
+};
+
 static inline void mmc_fixup_device(struct mmc_card *card,
 				    const struct mmc_fixup *table)
 {
 	const struct mmc_fixup *f;
 	u64 rev = cid_rev_card(card);
 
+	/* Non-core specific workarounds. */
+	if (!table)
+		table = mmc_fixup_methods;
+
 	for (f = table; f->vendor_fixup; f++) {
 		if ((f->manfid == CID_MANFID_ANY ||
 		     f->manfid == card->cid.manfid) &&
diff --git a/drivers/mmc/core/ring_buffer.c b/drivers/mmc/core/ring_buffer.c
new file mode 100644
index 0000000..35c89eb
--- /dev/null
+++ b/drivers/mmc/core/ring_buffer.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/mmc/ring_buffer.h>
+#include <linux/mmc/host.h>
+#include <linux/seq_file.h>
+
+void mmc_stop_tracing(struct mmc_host *mmc)
+{
+	mmc->trace_buf.stop_tracing = true;
+}
+
+void mmc_trace_write(struct mmc_host *mmc,
+			const char *fmt, ...)
+{
+	unsigned int idx;
+	va_list args;
+	char *event;
+	unsigned long flags;
+	char str[MMC_TRACE_EVENT_SZ];
+
+	if (unlikely(!mmc->trace_buf.data) ||
+			unlikely(mmc->trace_buf.stop_tracing))
+		return;
+
+	/*
+	 * Here an increment and modulus is used to keep
+	 * index within array bounds. The cast to unsigned is
+	 * necessary so increment and rolover wraps to 0 correctly
+	 */
+	spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+	mmc->trace_buf.wr_idx += 1;
+	idx = ((unsigned int)mmc->trace_buf.wr_idx) &
+			(MMC_TRACE_RBUF_NUM_EVENTS - 1);
+	spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+
+	/* Catch some unlikely machine specific wrap-around bug */
+	if (unlikely(idx > (MMC_TRACE_RBUF_NUM_EVENTS - 1))) {
+		pr_err("%s: %s: Invalid idx:%d for mmc trace, tracing stopped !\n",
+			mmc_hostname(mmc), __func__, idx);
+		mmc_stop_tracing(mmc);
+		return;
+	}
+
+	event = &mmc->trace_buf.data[idx * MMC_TRACE_EVENT_SZ];
+	va_start(args, fmt);
+	snprintf(str, MMC_TRACE_EVENT_SZ, "<%d> %lld: %s: %s",
+		raw_smp_processor_id(),
+		ktime_to_ns(ktime_get()),
+		mmc_hostname(mmc), fmt);
+	memset(event, '\0', MMC_TRACE_EVENT_SZ);
+	vscnprintf(event, MMC_TRACE_EVENT_SZ, str, args);
+	va_end(args);
+}
+
+void mmc_trace_init(struct mmc_host *mmc)
+{
+	BUILD_BUG_ON_NOT_POWER_OF_2(MMC_TRACE_RBUF_NUM_EVENTS);
+
+	mmc->trace_buf.data = (char *)
+				__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+				MMC_TRACE_RBUF_SZ_ORDER);
+
+	if (!mmc->trace_buf.data) {
+		pr_err("%s: %s: Unable to allocate trace for mmc\n",
+			__func__, mmc_hostname(mmc));
+		return;
+	}
+
+	spin_lock_init(&mmc->trace_buf.trace_lock);
+	mmc->trace_buf.wr_idx = -1;
+}
+
+void mmc_trace_free(struct mmc_host *mmc)
+{
+	if (mmc->trace_buf.data)
+		free_pages((unsigned long)mmc->trace_buf.data,
+			MMC_TRACE_RBUF_SZ_ORDER);
+}
+
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s)
+{
+	unsigned int idx, cur_idx;
+	unsigned int N = MMC_TRACE_RBUF_NUM_EVENTS - 1;
+	char *event;
+	unsigned long flags;
+
+	if (!mmc->trace_buf.data)
+		return;
+
+	spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+	idx = ((unsigned int)mmc->trace_buf.wr_idx) & N;
+	cur_idx = (idx + 1) & N;
+
+	do {
+		event = &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+		if (s)
+			seq_printf(s, "%s", (char *)event);
+		else
+			pr_err("%s\n", (char *)event);
+		cur_idx = (cur_idx + 1) & N;
+		if (cur_idx == idx) {
+			event =
+			  &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+			if (s)
+				seq_printf(s, "latest_event: %s",
+					(char *)event);
+			else
+				pr_err("latest_event: %s\n", (char *)event);
+			break;
+		}
+	} while (1);
+	spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d0d9f90..6ef491b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -29,6 +29,12 @@
 #include "sd.h"
 #include "sd_ops.h"
 
+#define UHS_SDR104_MIN_DTR	(100 * 1000 * 1000)
+#define UHS_DDR50_MIN_DTR	(50 * 1000 * 1000)
+#define UHS_SDR50_MIN_DTR	(50 * 1000 * 1000)
+#define UHS_SDR25_MIN_DTR	(25 * 1000 * 1000)
+#define UHS_SDR12_MIN_DTR	(12.5 * 1000 * 1000)
+
 static const unsigned int tran_exp[] = {
 	10000,		100000,		1000000,	10000000,
 	0,		0,		0,		0
@@ -361,9 +367,9 @@
 		goto out;
 
 	if ((status[16] & 0xF) != 1) {
-		pr_warn("%s: Problem switching card into high-speed mode!\n",
-			mmc_hostname(card->host));
-		err = 0;
+		pr_warn("%s: Problem switching card into high-speed mode!, status:%x\n",
+			mmc_hostname(card->host), (status[16] & 0xF));
+		err = -EBUSY;
 	} else {
 		err = 1;
 	}
@@ -417,24 +423,28 @@
 	}
 
 	if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
-	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
-			card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
-	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
-		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
-			card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104) &&
+	    (card->host->f_max > UHS_SDR104_MIN_DTR)) {
+		card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
 	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
 		    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
-		    SD_MODE_UHS_SDR50)) {
-			card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+		    SD_MODE_UHS_SDR50) &&
+		    (card->host->f_max > UHS_SDR50_MIN_DTR)) {
+		card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
+		    (card->host->f_max > UHS_DDR50_MIN_DTR)) {
+		card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
 	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
 		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
-		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
-			card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25) &&
+		 (card->host->f_max > UHS_SDR25_MIN_DTR)) {
+		card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
 	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
 		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
 		    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
 		    SD_MODE_UHS_SDR12)) {
-			card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+		card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
 	}
 }
 
@@ -472,15 +482,17 @@
 	if (err)
 		return err;
 
-	if ((status[16] & 0xF) != card->sd_bus_speed)
-		pr_warn("%s: Problem setting bus speed mode!\n",
-			mmc_hostname(card->host));
-	else {
+	if ((status[16] & 0xF) != card->sd_bus_speed) {
+		pr_warn("%s: Problem setting bus speed mode(%u)! max_dtr:%u, timing:%u, status:%x\n",
+			mmc_hostname(card->host), card->sd_bus_speed,
+			card->sw_caps.uhs_max_dtr, timing, (status[16] & 0xF));
+		err = -EBUSY;
+	} else {
 		mmc_set_timing(card->host, timing);
 		mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
 	}
 
-	return 0;
+	return err;
 }
 
 /* Get host's max current setting at its current voltage */
@@ -572,6 +584,64 @@
 	return 0;
 }
 
+/**
+ * mmc_sd_change_bus_speed() - Change SD card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the SD card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) beforing changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card and if it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ */
+static int mmc_sd_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+	int err = 0;
+	struct mmc_card *card;
+
+	mmc_claim_host(host);
+	/*
+	 * Assign card pointer after claiming host to avoid race
+	 * conditions that may arise during removal of the card.
+	 */
+	card = host->card;
+
+	/* sanity checks */
+	if (!card || !freq) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	mmc_set_clock(host, (unsigned int) (*freq));
+
+	if (!mmc_host_is_spi(card->host) && mmc_card_uhs(card)
+			&& card->host->ops->execute_tuning) {
+		/*
+		 * We try to probe host driver for tuning for any
+		 * frequency, it is host driver responsibility to
+		 * perform actual tuning only when required.
+		 */
+		mmc_host_clk_hold(card->host);
+		err = card->host->ops->execute_tuning(card->host,
+				MMC_SEND_TUNING_BLOCK);
+		mmc_host_clk_release(card->host);
+
+		if (err) {
+			pr_warn("%s: %s: tuning execution failed %d. Restoring to previous clock %lu\n",
+				   mmc_hostname(card->host), __func__, err,
+				   host->clk_scaling.curr_freq);
+			mmc_set_clock(host, host->clk_scaling.curr_freq);
+		}
+	}
+
+out:
+	mmc_release_host(host);
+	return err;
+}
+
 /*
  * UHS-I specific initialization procedure
  */
@@ -819,7 +889,9 @@
 	if (!host->ops->get_ro)
 		return -1;
 
+	mmc_host_clk_hold(host);
 	ro = host->ops->get_ro(host);
+	mmc_host_clk_release(host);
 
 	return ro;
 }
@@ -892,7 +964,10 @@
 {
 	unsigned max_dtr = (unsigned int)-1;
 
-	if (mmc_card_hs(card)) {
+	if (mmc_card_uhs(card)) {
+		if (max_dtr > card->sw_caps.uhs_max_dtr)
+			max_dtr = card->sw_caps.uhs_max_dtr;
+	} else if (mmc_card_hs(card)) {
 		if (max_dtr > card->sw_caps.hs_max_dtr)
 			max_dtr = card->sw_caps.hs_max_dtr;
 	} else if (max_dtr > card->csd.max_dtr) {
@@ -966,6 +1041,7 @@
 		err = mmc_send_relative_addr(host, &card->rca);
 		if (err)
 			goto free_card;
+		host->card = card;
 	}
 
 	if (!oldcard) {
@@ -1067,12 +1143,16 @@
 		goto free_card;
 	}
 done:
-	host->card = card;
+	card->clk_scaling_highest = mmc_sd_get_max_clock(card);
+	card->clk_scaling_lowest = host->f_min;
+
 	return 0;
 
 free_card:
-	if (!oldcard)
+	if (!oldcard) {
+		host->card = NULL;
 		mmc_remove_card(card);
+	}
 
 	return err;
 }
@@ -1082,8 +1162,12 @@
  */
 static void mmc_sd_remove(struct mmc_host *host)
 {
+	mmc_exit_clk_scaling(host);
 	mmc_remove_card(host->card);
+
+	mmc_claim_host(host);
 	host->card = NULL;
+	mmc_release_host(host);
 }
 
 /*
@@ -1101,6 +1185,18 @@
 {
 	int err;
 
+	/*
+	 * Try to acquire claim host. If failed to get the lock in 2 sec,
+	 * just return; This is to ensure that when this call is invoked
+	 * due to pm_suspend, not to block suspend for longer duration.
+	 */
+	pm_runtime_get_sync(&host->card->dev);
+	if (!mmc_try_claim_host(host, 2000)) {
+		pm_runtime_mark_last_busy(&host->card->dev);
+		pm_runtime_put_autosuspend(&host->card->dev);
+		return;
+	}
+
 	mmc_get_card(host->card, NULL);
 
 	/*
@@ -1124,6 +1220,13 @@
 {
 	int err = 0;
 
+	err = mmc_suspend_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+			mmc_hostname(host), __func__,  err);
+		return err;
+	}
+
 	mmc_claim_host(host);
 
 	if (mmc_card_suspended(host->card))
@@ -1149,11 +1252,16 @@
 {
 	int err;
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	err = _mmc_sd_suspend(host);
 	if (!err) {
 		pm_runtime_disable(&host->card->dev);
 		pm_runtime_set_suspended(&host->card->dev);
-	}
+	/* if suspend fails, force mmc_detect_change during resume */
+	} else if (mmc_bus_manual_resume(host))
+		host->ignore_bus_resume_flags = true;
+
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 
 	return err;
 }
@@ -1173,8 +1281,23 @@
 
 	mmc_power_up(host, host->card->ocr);
 	err = mmc_sd_init_card(host, host->card->ocr, host->card);
+	if (err == -ENOENT) {
+		pr_debug("%s: %s: found a different card(%d), do detect change\n",
+			mmc_hostname(host), __func__, err);
+		mmc_card_set_removed(host->card);
+		mmc_detect_change(host, msecs_to_jiffies(200));
+	} else if (err) {
+		goto out;
+	}
 	mmc_card_clr_suspended(host->card);
 
+	err = mmc_resume_clk_scaling(host);
+	if (err) {
+		pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+			mmc_hostname(host), __func__, err);
+		goto out;
+	}
+
 out:
 	mmc_release_host(host);
 	return err;
@@ -1185,8 +1308,16 @@
  */
 static int mmc_sd_resume(struct mmc_host *host)
 {
+	int err = 0;
+
+	MMC_TRACE(host, "%s: Enter\n", __func__);
+	err = _mmc_sd_resume(host);
+	pm_runtime_set_active(&host->card->dev);
+	pm_runtime_mark_last_busy(&host->card->dev);
 	pm_runtime_enable(&host->card->dev);
-	return 0;
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
+
+	return err;
 }
 
 /*
@@ -1237,6 +1368,7 @@
 	.resume = mmc_sd_resume,
 	.alive = mmc_sd_alive,
 	.shutdown = mmc_sd_suspend,
+	.change_bus_speed = mmc_sd_change_bus_speed,
 	.hw_reset = mmc_sd_hw_reset,
 };
 
@@ -1292,6 +1424,13 @@
 		goto remove_card;
 
 	mmc_claim_host(host);
+
+	err = mmc_init_clk_scaling(host);
+	if (err) {
+		mmc_release_host(host);
+		goto remove_card;
+	}
+
 	return 0;
 
 remove_card:
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index d8e17ea..716df16 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -184,6 +184,23 @@
 				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
 			if (data & SDIO_DRIVE_SDTD)
 				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+			ret = mmc_io_rw_direct(card, 0, 0,
+				SDIO_CCCR_INTERRUPT_EXTENSION, 0, &data);
+			if (ret)
+				goto out;
+			if (data & SDIO_SUPPORT_ASYNC_INTR) {
+				if (card->host->caps2 &
+				    MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) {
+					data |= SDIO_ENABLE_ASYNC_INTR;
+					ret = mmc_io_rw_direct(card, 1, 0,
+						SDIO_CCCR_INTERRUPT_EXTENSION,
+						data, NULL);
+					if (ret)
+						goto out;
+					card->cccr.async_intr_sup = 1;
+				}
+			}
 		}
 
 		/* if no uhs mode ensure we check for high speed */
@@ -202,12 +219,60 @@
 	return ret;
 }
 
+static void sdio_enable_vendor_specific_settings(struct mmc_card *card)
+{
+	int ret;
+	u8 settings;
+
+	if (mmc_enable_qca6574_settings(card) ||
+		mmc_enable_qca9377_settings(card)) {
+		ret = mmc_io_rw_direct(card, 1, 0, 0xF2, 0x0F, NULL);
+		if (ret) {
+			pr_crit("%s: failed to write to fn 0xf2 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		ret = mmc_io_rw_direct(card, 0, 0, 0xF1, 0, &settings);
+		if (ret) {
+			pr_crit("%s: failed to read fn 0xf1 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		settings |= 0x80;
+		ret = mmc_io_rw_direct(card, 1, 0, 0xF1, settings, NULL);
+		if (ret) {
+			pr_crit("%s: failed to write to fn 0xf1 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		ret = mmc_io_rw_direct(card, 0, 0, 0xF0, 0, &settings);
+		if (ret) {
+			pr_crit("%s: failed to read fn 0xf0 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+
+		settings |= 0x20;
+		ret = mmc_io_rw_direct(card, 1, 0, 0xF0, settings, NULL);
+		if (ret) {
+			pr_crit("%s: failed to write to fn 0xf0 %d\n",
+					mmc_hostname(card->host), ret);
+			goto out;
+		}
+	}
+out:
+	return;
+}
+
 static int sdio_enable_wide(struct mmc_card *card)
 {
 	int ret;
 	u8 ctrl;
 
-	if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+	if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
 		return 0;
 
 	if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -223,7 +288,10 @@
 
 	/* set as 4-bit bus width */
 	ctrl &= ~SDIO_BUS_WIDTH_MASK;
-	ctrl |= SDIO_BUS_WIDTH_4BIT;
+	if (card->host->caps & MMC_CAP_8_BIT_DATA)
+		ctrl |= SDIO_BUS_WIDTH_8BIT;
+	else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+		ctrl |= SDIO_BUS_WIDTH_4BIT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
 	if (ret)
@@ -264,7 +332,7 @@
 	int ret;
 	u8 ctrl;
 
-	if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+	if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
 		return 0;
 
 	if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -274,10 +342,10 @@
 	if (ret)
 		return ret;
 
-	if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+	if (!(ctrl & (SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT)))
 		return 0;
 
-	ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+	ctrl &= ~(SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT);
 	ctrl |= SDIO_BUS_ASYNC_INT;
 
 	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -495,6 +563,9 @@
 	if (err)
 		return err;
 
+	/* Vendor specific settings based on card quirks */
+	sdio_enable_vendor_specific_settings(card);
+
 	speed &= ~SDIO_SPEED_BSS_MASK;
 	speed |= bus_speed;
 	err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
@@ -615,7 +686,8 @@
 		card->type = MMC_TYPE_SD_COMBO;
 
 		if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
-		    memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) {
+		    memcmp(card->raw_cid, oldcard->raw_cid,
+				sizeof(card->raw_cid)) != 0)) {
 			mmc_remove_card(card);
 			return -ENOENT;
 		}
@@ -631,8 +703,11 @@
 	/*
 	 * Call the optional HC's init_card function to handle quirks.
 	 */
-	if (host->ops->init_card)
+	if (host->ops->init_card) {
+		mmc_host_clk_hold(host);
 		host->ops->init_card(host, card);
+		mmc_host_clk_release(host);
+	}
 
 	/*
 	 * If the host and card support UHS-I mode request the card
@@ -789,7 +864,12 @@
 		 * Switch to wider bus (if supported).
 		 */
 		err = sdio_enable_4bit_bus(card);
-		if (err)
+		if (err > 0) {
+			if (card->host->caps & MMC_CAP_8_BIT_DATA)
+				mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8);
+			else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+				mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+		} else if (err)
 			goto remove;
 	}
 
@@ -934,6 +1014,7 @@
  */
 static int mmc_sdio_suspend(struct mmc_host *host)
 {
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	mmc_claim_host(host);
 
 	if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
@@ -941,13 +1022,15 @@
 
 	if (!mmc_card_keep_power(host)) {
 		mmc_power_off(host);
+	} else if (host->ios.clock) {
+		mmc_gate_clock(host);
 	} else if (host->retune_period) {
 		mmc_retune_timer_stop(host);
 		mmc_retune_needed(host);
 	}
 
 	mmc_release_host(host);
-
+	MMC_TRACE(host, "%s: Exit\n", __func__);
 	return 0;
 }
 
@@ -955,6 +1038,7 @@
 {
 	int err = 0;
 
+	MMC_TRACE(host, "%s: Enter\n", __func__);
 	/* Basic card reinitialization. */
 	mmc_claim_host(host);
 
@@ -980,18 +1064,30 @@
 	} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
 		/* We may have switched to 1-bit mode during suspend */
 		err = sdio_enable_4bit_bus(host->card);
+		if (err > 0) {
+			if (host->caps & MMC_CAP_8_BIT_DATA)
+				mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
+			else if (host->caps & MMC_CAP_4_BIT_DATA)
+				mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+			err = 0;
+		}
 	}
 
 	if (!err && host->sdio_irqs) {
-		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
+		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
 			wake_up_process(host->sdio_irq_thread);
-		else if (host->caps & MMC_CAP_SDIO_IRQ)
+		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
 	}
 
 	mmc_release_host(host);
 
 	host->pm_flags &= ~MMC_PM_KEEP_POWER;
+	host->pm_flags &= ~MMC_PM_WAKE_SDIO_IRQ;
+	MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
 	return err;
 }
 
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f8c3728..d024c12 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -55,7 +55,7 @@
 
 	for (i = 0; i < nr_strings; i++) {
 		buffer[i] = string;
-		strcpy(string, buf);
+		strlcpy(string, buf, strlen(buf) + 1);
 		string += strlen(string) + 1;
 		buf += strlen(buf) + 1;
 	}
@@ -277,8 +277,16 @@
 			break;
 
 		/* null entries have no link field or data */
-		if (tpl_code == 0x00)
+		if (tpl_code == 0x00) {
+			if (card->cis.vendor == 0x70 &&
+				(card->cis.device == 0x2460 ||
+				 card->cis.device == 0x0460 ||
+				 card->cis.device == 0x23F1 ||
+				 card->cis.device == 0x23F0))
+				break;
+
 			continue;
+		}
 
 		ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
 		if (ret)
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 7ca7b99..b63955b 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -97,7 +97,9 @@
 	mmc_claim_host(host);
 	if (host->sdio_irqs) {
 		host->sdio_irq_pending = true;
+		mmc_host_clk_hold(host);
 		process_sdio_pending_irqs(host);
+		mmc_host_clk_release(host);
 		if (host->ops->ack_sdio_irq)
 			host->ops->ack_sdio_irq(host);
 	}
@@ -125,6 +127,7 @@
 	struct sched_param param = { .sched_priority = 1 };
 	unsigned long period, idle_period;
 	int ret;
+	bool ws;
 
 	sched_setscheduler(current, SCHED_FIFO, &param);
 
@@ -159,6 +162,17 @@
 				       &host->sdio_irq_thread_abort);
 		if (ret)
 			break;
+		ws = false;
+		/*
+		 * prevent suspend if it has started when scheduled;
+		 * 100 msec (approx. value) should be enough for the system to
+		 * resume and attend to the card's request
+		 */
+		if ((host->dev_status == DEV_SUSPENDING) ||
+		    (host->dev_status == DEV_SUSPENDED)) {
+			pm_wakeup_event(&host->card->dev, 100);
+			ws = true;
+		}
 		ret = process_sdio_pending_irqs(host);
 		host->sdio_irq_pending = false;
 		mmc_release_host(host);
@@ -190,15 +204,27 @@
 		}
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (host->caps & MMC_CAP_SDIO_IRQ)
+		if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
+		/*
+		 * function drivers would have processed the event from card
+		 * unless suspended, hence release wake source
+		 */
+		if (ws && (host->dev_status == DEV_RESUMED))
+			pm_relax(&host->card->dev);
 		if (!kthread_should_stop())
 			schedule_timeout(period);
 		set_current_state(TASK_RUNNING);
 	} while (!kthread_should_stop());
 
-	if (host->caps & MMC_CAP_SDIO_IRQ)
+	if (host->caps & MMC_CAP_SDIO_IRQ) {
+		mmc_host_clk_hold(host);
 		host->ops->enable_sdio_irq(host, 0);
+		mmc_host_clk_release(host);
+	}
 
 	pr_debug("%s: IRQ thread exiting with code %d\n",
 		 mmc_hostname(host), ret);
@@ -224,7 +250,9 @@
 				return err;
 			}
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
 		}
 	}
 
@@ -245,7 +273,9 @@
 			atomic_set(&host->sdio_irq_thread_abort, 1);
 			kthread_stop(host->sdio_irq_thread);
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 0);
+			mmc_host_clk_release(host);
 		}
 	}
 
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 86803a3..514da6a 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -17,6 +17,7 @@
 #include <linux/mmc/slot-gpio.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/extcon.h>
 
 #include "slot-gpio.h"
 
@@ -64,6 +65,15 @@
 int mmc_gpio_get_ro(struct mmc_host *host)
 {
 	struct mmc_gpio *ctx = host->slot.handler_priv;
+	int ret;
+
+	if (host->extcon) {
+		ret =  extcon_get_state(host->extcon, EXTCON_MECHANICAL);
+		if (ret < 0)
+			dev_err(mmc_dev(host), "%s: Extcon failed to check card state, ret=%d\n",
+					__func__, ret);
+		return ret;
+	}
 
 	if (!ctx || !ctx->ro_gpio)
 		return -ENOSYS;
@@ -183,6 +193,53 @@
 }
 EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
 
+static int mmc_card_detect_notifier(struct notifier_block *nb,
+				       unsigned long event, void *ptr)
+{
+	struct mmc_host *host = container_of(nb, struct mmc_host,
+					     card_detect_nb);
+
+	host->trigger_card_event = true;
+	mmc_detect_change(host, 0);
+
+	return NOTIFY_DONE;
+}
+
+void mmc_register_extcon(struct mmc_host *host)
+{
+	struct extcon_dev *extcon = host->extcon;
+	int err;
+
+	if (!extcon)
+		return;
+
+	host->card_detect_nb.notifier_call = mmc_card_detect_notifier;
+	err = extcon_register_notifier(extcon, EXTCON_MECHANICAL,
+				       &host->card_detect_nb);
+	if (err) {
+		dev_err(mmc_dev(host), "%s: extcon_register_notifier() failed ret=%d\n",
+			__func__, err);
+		host->caps |= MMC_CAP_NEEDS_POLL;
+	}
+}
+EXPORT_SYMBOL(mmc_register_extcon);
+
+void mmc_unregister_extcon(struct mmc_host *host)
+{
+	struct extcon_dev *extcon = host->extcon;
+	int err;
+
+	if (!extcon)
+		return;
+
+	err = extcon_unregister_notifier(extcon, EXTCON_MECHANICAL,
+					 &host->card_detect_nb);
+	if (err)
+		dev_err(mmc_dev(host), "%s: extcon_unregister_notifier() failed ret=%d\n",
+			__func__, err);
+}
+EXPORT_SYMBOL(mmc_unregister_extcon);
+
 /* Register an alternate interrupt service routine for
  * the card-detect GPIO.
  */
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 694d082..59ce9d7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -450,14 +450,14 @@
 	  If unsure, say N.
 
 config MMC_SDHCI_MSM
-	tristate "Qualcomm SDHCI Controller Support"
-	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+	tristate "Qualcomm Technologies, Inc. SDHCI Controller Support"
+	depends on ARCH_QCOM || ARCH_MSM || (ARM && COMPILE_TEST)
 	depends on MMC_SDHCI_PLTFM
 	select MMC_SDHCI_IO_ACCESSORS
 	help
 	  This selects the Secure Digital Host Controller Interface (SDHCI)
-	  support present in Qualcomm SOCs. The controller supports
-	  SD/MMC/SDIO devices.
+	  support present in Qualcomm Technologies, Inc. SOCs. The controller
+	  supports SD/MMC/SDIO devices.
 
 	  If you have a controller with this interface, say Y or M here.
 
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ce8398e..c30312d 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -85,6 +85,8 @@
 obj-$(CONFIG_MMC_SDHCI_OF_HLWD)		+= sdhci-of-hlwd.o
 obj-$(CONFIG_MMC_SDHCI_OF_DWCMSHC)	+= sdhci-of-dwcmshc.o
 obj-$(CONFIG_MMC_SDHCI_BCM_KONA)	+= sdhci-bcm-kona.o
+obj-$(CONFIG_MMC_SDHCI_MSM)		+= sdhci-msm.o
+obj-$(CONFIG_MMC_SDHCI_MSM_ICE)		+= sdhci-msm-ice.o
 obj-$(CONFIG_MMC_SDHCI_IPROC)		+= sdhci-iproc.o
 obj-$(CONFIG_MMC_SDHCI_MSM)		+= sdhci-msm.o
 obj-$(CONFIG_MMC_SDHCI_ST)		+= sdhci-st.o
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
new file mode 100644
index 0000000..897d240
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015, 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "sdhci-msm-ice.h"
+
+static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error)
+{
+	struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
+
+	dev_err(&msm_host->pdev->dev, "%s: Error in ice operation 0x%x\n",
+		__func__, error);
+
+	if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE)
+		msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
+}
+
+static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev)
+{
+	struct device_node *node;
+	struct platform_device *ice_pdev = NULL;
+
+	node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
+	if (!node) {
+		dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
+			__func__);
+		goto out;
+	}
+	ice_pdev = qcom_ice_get_pdevice(node);
+out:
+	return ice_pdev;
+}
+
+static
+struct qcom_ice_variant_ops *sdhci_msm_ice_get_vops(struct device *dev)
+{
+	struct qcom_ice_variant_ops *ice_vops = NULL;
+	struct device_node *node;
+
+	node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
+	if (!node) {
+		dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
+			__func__);
+		goto out;
+	}
+	ice_vops = qcom_ice_get_variant_ops(node);
+	of_node_put(node);
+out:
+	return ice_vops;
+}
+
+static
+void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u32 config = 0;
+	u32 ice_cap = 0;
+
+	/*
+	 * Enable the cryptographic support inside SDHC.
+	 * This is a global config which needs to be enabled
+	 * all the time.
+	 * Only when it it is enabled, the ICE_HCI capability
+	 * will get reflected in CQCAP register.
+	 */
+	config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+	if (enable)
+		config &= ~DISABLE_CRYPTO;
+	else
+		config |= DISABLE_CRYPTO;
+	writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+	/*
+	 * CQCAP register is in different register space from above
+	 * ice global enable register. So a mb() is required to ensure
+	 * above write gets completed before reading the CQCAP register.
+	 */
+	mb();
+
+	/*
+	 * Check if ICE HCI capability support is present
+	 * If present, enable it.
+	 */
+	ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES);
+	if (ice_cap & ICE_HCI_SUPPORT) {
+		config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG);
+
+		if (enable)
+			config |= CRYPTO_GENERAL_ENABLE;
+		else
+			config &= ~CRYPTO_GENERAL_ENABLE;
+		writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG);
+	}
+}
+
+int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+	struct device *sdhc_dev;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (!msm_host || !msm_host->pdev) {
+		pr_err("%s: invalid msm_host %p or msm_host->pdev\n",
+			__func__, msm_host);
+		return -EINVAL;
+	}
+
+	sdhc_dev = &msm_host->pdev->dev;
+	msm_host->ice.vops  = sdhci_msm_ice_get_vops(sdhc_dev);
+	msm_host->ice.pdev = sdhci_msm_ice_get_pdevice(sdhc_dev);
+
+	if (msm_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
+		dev_err(sdhc_dev, "%s: ICE device not probed yet\n",
+			__func__);
+		msm_host->ice.pdev = NULL;
+		msm_host->ice.vops = NULL;
+		return -EPROBE_DEFER;
+	}
+
+	if (!msm_host->ice.pdev) {
+		dev_dbg(sdhc_dev, "%s: invalid platform device\n", __func__);
+		msm_host->ice.vops = NULL;
+		return -ENODEV;
+	}
+	if (!msm_host->ice.vops) {
+		dev_dbg(sdhc_dev, "%s: invalid ice vops\n", __func__);
+		msm_host->ice.pdev = NULL;
+		return -ENODEV;
+	}
+	msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
+	return 0;
+}
+
+static
+int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host)
+{
+	struct resource *ice_memres = NULL;
+	struct platform_device *pdev = msm_host->pdev;
+	int err = 0;
+
+	if (!msm_host->ice_hci_support)
+		goto out;
+	/*
+	 * ICE HCI registers are present in cmdq register space.
+	 * So map the cmdq mem for accessing ICE HCI registers.
+	 */
+	ice_memres = platform_get_resource_byname(pdev,
+						IORESOURCE_MEM, "cmdq_mem");
+	if (!ice_memres) {
+		dev_err(&pdev->dev, "Failed to get iomem resource for ice\n");
+		err = -EINVAL;
+		goto out;
+	}
+	msm_host->cryptoio = devm_ioremap(&pdev->dev,
+					ice_memres->start,
+					resource_size(ice_memres));
+	if (!msm_host->cryptoio) {
+		dev_err(&pdev->dev, "Failed to remap registers\n");
+		err = -ENOMEM;
+	}
+out:
+	return err;
+}
+
+int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+
+	if (msm_host->ice.vops->init) {
+		err = sdhci_msm_ice_pltfm_init(msm_host);
+		if (err)
+			goto out;
+
+		if (msm_host->ice_hci_support)
+			sdhci_msm_enable_ice_hci(host, true);
+
+		err = msm_host->ice.vops->init(msm_host->ice.pdev,
+					msm_host,
+					sdhci_msm_ice_error_cb);
+		if (err) {
+			pr_err("%s: ice init err %d\n",
+				mmc_hostname(host->mmc), err);
+			sdhci_msm_ice_print_regs(host);
+			if (msm_host->ice_hci_support)
+				sdhci_msm_enable_ice_hci(host, false);
+			goto out;
+		}
+		msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+	}
+
+out:
+	return err;
+}
+
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+	writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS,
+		host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+}
+
+static
+int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
+			unsigned int *bypass, short *key_index)
+{
+	int err = 0;
+	struct ice_data_setting ice_set;
+
+	memset(&ice_set, 0, sizeof(struct ice_data_setting));
+	if (msm_host->ice.vops->config_start) {
+		err = msm_host->ice.vops->config_start(
+						msm_host->ice.pdev,
+						req, &ice_set, false);
+		if (err) {
+			pr_err("%s: ice config failed %d\n",
+					mmc_hostname(msm_host->mmc), err);
+			return err;
+		}
+	}
+	/* if writing data command */
+	if (rq_data_dir(req) == WRITE)
+		*bypass = ice_set.encr_bypass ?
+				SDHCI_MSM_ICE_ENABLE_BYPASS :
+				SDHCI_MSM_ICE_DISABLE_BYPASS;
+	/* if reading data command */
+	else if (rq_data_dir(req) == READ)
+		*bypass = ice_set.decr_bypass ?
+				SDHCI_MSM_ICE_ENABLE_BYPASS :
+				SDHCI_MSM_ICE_DISABLE_BYPASS;
+	*key_index = ice_set.crypto_data.key_index;
+	return err;
+}
+
+static
+void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba, u32 slot,
+		unsigned int bypass, short key_index, u32 cdu_sz)
+{
+	unsigned int ctrl_info_val = 0;
+
+	/* Configure ICE index */
+	ctrl_info_val =
+		(key_index &
+		 MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
+		 << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
+
+	/* Configure data unit size of transfer request */
+	ctrl_info_val |=
+		(cdu_sz &
+		 MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU)
+		 << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU;
+
+	/* Configure ICE bypass mode */
+	ctrl_info_val |=
+		(bypass & MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS)
+		 << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS;
+
+	writel_relaxed((lba & 0xFFFFFFFF),
+		host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n + 16 * slot);
+	writel_relaxed(((lba >> 32) & 0xFFFFFFFF),
+		host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
+	writel_relaxed(ctrl_info_val,
+		host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+	/* Ensure ICE registers are configured before issuing SDHCI request */
+	mb();
+}
+
+static inline
+void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass,
+				short key_index, u64 *ice_ctx)
+{
+	/*
+	 * The naming convention got changed between ICE2.0 and ICE3.0
+	 * registers fields. Below is the equivalent names for
+	 * ICE3.0 Vs ICE2.0:
+	 *   Data Unit Number(DUN) == Logical Base address(LBA)
+	 *   Crypto Configuration index (CCI) == Key Index
+	 *   Crypto Enable (CE) == !BYPASS
+	 */
+	if (ice_ctx)
+		*ice_ctx = DATA_UNIT_NUM(dun) |
+			CRYPTO_CONFIG_INDEX(key_index) |
+			CRYPTO_ENABLE(!bypass);
+}
+
+static
+void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host,
+		u64 dun, unsigned int bypass, short key_index)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned int crypto_params = 0;
+	/*
+	 * The naming convention got changed between ICE2.0 and ICE3.0
+	 * registers fields. Below is the equivalent names for
+	 * ICE3.0 Vs ICE2.0:
+	 *   Data Unit Number(DUN) == Logical Base address(LBA)
+	 *   Crypto Configuration index (CCI) == Key Index
+	 *   Crypto Enable (CE) == !BYPASS
+	 */
+	/* Configure ICE bypass mode */
+	crypto_params |=
+		((!bypass) & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE)
+			<< OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE;
+	/* Configure Crypto Configure Index (CCI) */
+	crypto_params |= (key_index &
+			 MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI)
+			 << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI;
+
+	writel_relaxed((crypto_params & 0xFFFFFFFF),
+		msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS);
+
+	/* Update DUN */
+	writel_relaxed((dun & 0xFFFFFFFF),
+		msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN);
+	/* Ensure ICE registers are configured before issuing SDHCI request */
+	mb();
+}
+
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+			u32 slot)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+	short key_index = 0;
+	u64 dun = 0;
+	unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+	u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B;
+	struct request *req;
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	WARN_ON(!mrq);
+	if (!mrq)
+		return -EINVAL;
+	req = mrq->req;
+	if (req && req->bio) {
+#ifdef CONFIG_PFK
+		if (bio_dun(req->bio)) {
+			dun = bio_dun(req->bio);
+			cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB;
+		} else {
+			dun = req->__sector;
+		}
+#else
+		dun = req->__sector;
+#endif
+		err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+		if (err)
+			return err;
+		pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+				mmc_hostname(host->mmc),
+				(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+				slot, bypass, key_index);
+	}
+
+	if (msm_host->ice_hci_support) {
+		/* For ICE HCI / ICE3.0 */
+		sdhci_msm_ice_hci_update_noncq_cfg(host, dun, bypass,
+						key_index);
+	} else {
+		/* For ICE versions earlier to ICE3.0 */
+		sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index,
+					cdu_sz);
+	}
+	return 0;
+}
+
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+			struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+	short key_index = 0;
+	u64 dun = 0;
+	unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+	struct request *req;
+	u32 cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_512_B;
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	WARN_ON(!mrq);
+	if (!mrq)
+		return -EINVAL;
+	req = mrq->req;
+	if (req && req->bio) {
+#ifdef CONFIG_PFK
+		if (bio_dun(req->bio)) {
+			dun = bio_dun(req->bio);
+			cdu_sz = SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB;
+		} else {
+			dun = req->__sector;
+		}
+#else
+		dun = req->__sector;
+#endif
+		err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+		if (err)
+			return err;
+		pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+				mmc_hostname(host->mmc),
+				(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+				slot, bypass, key_index);
+	}
+
+	if (msm_host->ice_hci_support) {
+		/* For ICE HCI / ICE3.0 */
+		sdhci_msm_ice_hci_update_cmdq_cfg(dun, bypass, key_index,
+						ice_ctx);
+	} else {
+		/* For ICE versions earlier to ICE3.0 */
+		sdhci_msm_ice_update_cfg(host, dun, slot, bypass, key_index,
+					cdu_sz);
+	}
+	return 0;
+}
+
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+	struct request *req;
+
+	if (!host->is_crypto_en)
+		return 0;
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	req = mrq->req;
+	if (req) {
+		if (msm_host->ice.vops->config_end) {
+			err = msm_host->ice.vops->config_end(req);
+			if (err) {
+				pr_err("%s: ice config end failed %d\n",
+						mmc_hostname(host->mmc), err);
+				return err;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state before reset %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	if (msm_host->ice.vops->reset) {
+		err = msm_host->ice.vops->reset(msm_host->ice.pdev);
+		if (err) {
+			pr_err("%s: ice reset failed %d\n",
+					mmc_hostname(host->mmc), err);
+			sdhci_msm_ice_print_regs(host);
+			return err;
+		}
+	}
+
+	/* If ICE HCI support is present then re-enable it */
+	if (msm_host->ice_hci_support)
+		sdhci_msm_enable_ice_hci(host, true);
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state after reset %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+
+	if (msm_host->ice.state !=
+			SDHCI_MSM_ICE_STATE_SUSPENDED) {
+		pr_err("%s: ice is in invalid state before resume %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	if (msm_host->ice.vops->resume) {
+		err = msm_host->ice.vops->resume(msm_host->ice.pdev);
+		if (err) {
+			pr_err("%s: ice resume failed %d\n",
+					mmc_hostname(host->mmc), err);
+			return err;
+		}
+	}
+
+	msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+	return 0;
+}
+
+int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+
+	if (msm_host->ice.state !=
+			SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state before resume %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	if (msm_host->ice.vops->suspend) {
+		err = msm_host->ice.vops->suspend(msm_host->ice.pdev);
+		if (err) {
+			pr_err("%s: ice suspend failed %d\n",
+					mmc_hostname(host->mmc), err);
+			return -EINVAL;
+		}
+	}
+	msm_host->ice.state = SDHCI_MSM_ICE_STATE_SUSPENDED;
+	return 0;
+}
+
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int stat = -EINVAL;
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	if (msm_host->ice.vops->status) {
+		*ice_status = 0;
+		stat = msm_host->ice.vops->status(msm_host->ice.pdev);
+		if (stat < 0) {
+			pr_err("%s: ice get sts failed %d\n",
+					mmc_hostname(host->mmc), stat);
+			return -EINVAL;
+		}
+		*ice_status = stat;
+	}
+	return 0;
+}
+
+void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (msm_host->ice.vops->debug)
+		msm_host->ice.vops->debug(msm_host->ice.pdev);
+}
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
new file mode 100644
index 0000000..a722db6
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SDHCI_MSM_ICE_H__
+#define __SDHCI_MSM_ICE_H__
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
+//#include <crypto/ice.h>
+
+#include "sdhci-msm.h"
+
+#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto"
+/* Timeout waiting for ICE initialization, that requires TZ access */
+#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS	500
+
+/*
+ * SDHCI host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS	32
+
+#define CORE_VENDOR_SPEC_ICE_CTRL		0x300
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n	0x304
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n	0x308
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n	0x30C
+
+/* ICE3.0 register which got added cmdq reg space */
+#define ICE_CQ_CAPABILITIES	0x04
+#define ICE_HCI_SUPPORT		(1 << 28)
+#define ICE_CQ_CONFIG		0x08
+#define CRYPTO_GENERAL_ENABLE	(1 << 1)
+#define ICE_NONCQ_CRYPTO_PARAMS	0x70
+#define ICE_NONCQ_CRYPTO_DUN	0x74
+
+/* ICE3.0 register which got added hc reg space */
+#define HC_VENDOR_SPECIFIC_FUNC4	0x260
+#define DISABLE_CRYPTO			(1 << 15)
+#define HC_VENDOR_SPECIFIC_ICE_CTRL	0x800
+#define ICE_SW_RST_EN			(1 << 0)
+
+/* SDHCI MSM ICE CTRL Info register offset */
+enum {
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS     = 0,
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 1,
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 6,
+	OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI	  = 0,
+	OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE	  = 8,
+};
+
+/* SDHCI MSM ICE CTRL Info register masks */
+enum {
+	MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS     = 0x1,
+	MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 0x1F,
+	MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 0x7,
+	MASK_SDHCI_MSM_ICE_HCI_PARAM_CE		= 0x1,
+	MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI	= 0xff
+};
+
+/* SDHCI MSM ICE encryption/decryption bypass state */
+enum {
+	SDHCI_MSM_ICE_DISABLE_BYPASS  = 0,
+	SDHCI_MSM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+	SDHCI_MSM_ICE_TR_DATA_UNIT_512_B          = 0,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB           = 1,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB           = 2,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB           = 3,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB           = 4,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB          = 5,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB          = 6,
+	SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB          = 7,
+};
+
+/* SDHCI MSM ICE internal state */
+enum {
+	SDHCI_MSM_ICE_STATE_DISABLED   = 0,
+	SDHCI_MSM_ICE_STATE_ACTIVE     = 1,
+	SDHCI_MSM_ICE_STATE_SUSPENDED  = 2,
+};
+
+/* crypto context fields in cmdq data command task descriptor */
+#define DATA_UNIT_NUM(x)	(((u64)(x) & 0xFFFFFFFF) << 0)
+#define CRYPTO_CONFIG_INDEX(x)	(((u64)(x) & 0xFF) << 32)
+#define CRYPTO_ENABLE(x)	(((u64)(x) & 0x1) << 47)
+
+#ifdef CONFIG_MMC_SDHCI_MSM_ICE
+int sdhci_msm_ice_get_dev(struct sdhci_host *host);
+int sdhci_msm_ice_init(struct sdhci_host *host);
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+			u32 slot);
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+			struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq);
+int sdhci_msm_ice_reset(struct sdhci_host *host);
+int sdhci_msm_ice_resume(struct sdhci_host *host);
+int sdhci_msm_ice_suspend(struct sdhci_host *host);
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status);
+void sdhci_msm_ice_print_regs(struct sdhci_host *host);
+#else
+inline int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (msm_host) {
+		msm_host->ice.pdev = NULL;
+		msm_host->ice.vops = NULL;
+	}
+	return -ENODEV;
+}
+inline int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+	return 0;
+}
+
+inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+}
+
+inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
+		struct mmc_request *mrq, u32 slot)
+{
+	return 0;
+}
+static inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+		struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+	return 0;
+}
+static inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host,
+			struct mmc_request *mrq)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+	return 0;
+}
+inline int sdhci_msm_ice_get_status(struct sdhci_host *host,
+				   int *ice_status)
+{
+	return 0;
+}
+inline void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+}
+#endif /* CONFIG_MMC_SDHCI_MSM_ICE */
+#endif /* __SDHCI_MSM_ICE_H__ */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 3cc8bfe..a82a8cb 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1,91 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
- *
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/delay.h>
-#include <linux/mmc/mmc.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/iopoll.h>
+#include <linux/of_gpio.h>
 #include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_runtime.h>
+#include <trace/events/mmc.h>
 
-#include "sdhci-pltfm.h"
+#include "sdhci-msm.h"
+#include "sdhci-msm-ice.h"
 
-#define CORE_MCI_VERSION		0x50
-#define CORE_VERSION_MAJOR_SHIFT	28
-#define CORE_VERSION_MAJOR_MASK		(0xf << CORE_VERSION_MAJOR_SHIFT)
-#define CORE_VERSION_MINOR_MASK		0xff
-
-#define CORE_MCI_GENERICS		0x70
-#define SWITCHABLE_SIGNALING_VOLTAGE	BIT(29)
-
-#define HC_MODE_EN		0x1
+#define QOS_REMOVE_DELAY_MS	10
 #define CORE_POWER		0x0
-#define CORE_SW_RST		BIT(7)
-#define FF_CLK_SW_RST_DIS	BIT(13)
+#define CORE_SW_RST		(1 << 7)
 
-#define CORE_PWRCTL_BUS_OFF	BIT(0)
-#define CORE_PWRCTL_BUS_ON	BIT(1)
-#define CORE_PWRCTL_IO_LOW	BIT(2)
-#define CORE_PWRCTL_IO_HIGH	BIT(3)
-#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
-#define CORE_PWRCTL_IO_SUCCESS	BIT(2)
-#define REQ_BUS_OFF		BIT(0)
-#define REQ_BUS_ON		BIT(1)
-#define REQ_IO_LOW		BIT(2)
-#define REQ_IO_HIGH		BIT(3)
-#define INT_MASK		0xf
+#define SDHCI_VER_100		0x2B
+
+#define CORE_VERSION_STEP_MASK		0x0000FFFF
+#define CORE_VERSION_MINOR_MASK		0x0FFF0000
+#define CORE_VERSION_MINOR_SHIFT	16
+#define CORE_VERSION_MAJOR_MASK		0xF0000000
+#define CORE_VERSION_MAJOR_SHIFT	28
+#define CORE_VERSION_TARGET_MASK	0x000000FF
+#define SDHCI_MSM_VER_420               0x49
+
+#define SWITCHABLE_SIGNALLING_VOL	(1 << 29)
+
+#define CORE_VERSION_MAJOR_MASK		0xF0000000
+#define CORE_VERSION_MAJOR_SHIFT	28
+
+#define CORE_HC_MODE		0x78
+#define HC_MODE_EN		0x1
+#define FF_CLK_SW_RST_DIS	(1 << 13)
+
+#define CORE_PWRCTL_BUS_OFF	0x01
+#define CORE_PWRCTL_BUS_ON	(1 << 1)
+#define CORE_PWRCTL_IO_LOW	(1 << 2)
+#define CORE_PWRCTL_IO_HIGH	(1 << 3)
+
+#define CORE_PWRCTL_BUS_SUCCESS	0x01
+#define CORE_PWRCTL_BUS_FAIL	(1 << 1)
+#define CORE_PWRCTL_IO_SUCCESS	(1 << 2)
+#define CORE_PWRCTL_IO_FAIL	(1 << 3)
+
+#define INT_MASK		0xF
 #define MAX_PHASES		16
-#define CORE_DLL_LOCK		BIT(7)
-#define CORE_DDR_DLL_LOCK	BIT(11)
-#define CORE_DLL_EN		BIT(16)
-#define CORE_CDR_EN		BIT(17)
-#define CORE_CK_OUT_EN		BIT(18)
-#define CORE_CDR_EXT_EN		BIT(19)
-#define CORE_DLL_PDN		BIT(29)
-#define CORE_DLL_RST		BIT(30)
-#define CORE_CMD_DAT_TRACK_SEL	BIT(0)
 
-#define CORE_DDR_CAL_EN		BIT(0)
-#define CORE_FLL_CYCLE_CNT	BIT(18)
-#define CORE_DLL_CLOCK_DISABLE	BIT(21)
+#define CORE_CMD_DAT_TRACK_SEL	(1 << 0)
+#define CORE_DLL_EN		(1 << 16)
+#define CORE_CDR_EN		(1 << 17)
+#define CORE_CK_OUT_EN		(1 << 18)
+#define CORE_CDR_EXT_EN		(1 << 19)
+#define CORE_DLL_PDN		(1 << 29)
+#define CORE_DLL_RST		(1 << 30)
 
-#define CORE_VENDOR_SPEC_POR_VAL 0xa1c
-#define CORE_CLK_PWRSAVE	BIT(1)
-#define CORE_HC_MCLK_SEL_DFLT	(2 << 8)
-#define CORE_HC_MCLK_SEL_HS400	(3 << 8)
-#define CORE_HC_MCLK_SEL_MASK	(3 << 8)
+#define CORE_DLL_LOCK		(1 << 7)
+#define CORE_DDR_DLL_LOCK	(1 << 11)
+
+#define CORE_CLK_PWRSAVE		(1 << 1)
+#define CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN	(1 << 7)
+#define CORE_HC_MCLK_SEL_DFLT		(2 << 8)
+#define CORE_HC_MCLK_SEL_HS400		(3 << 8)
+#define CORE_HC_MCLK_SEL_MASK		(3 << 8)
+#define CORE_HC_AUTO_CMD21_EN		(1 << 6)
 #define CORE_IO_PAD_PWR_SWITCH_EN	(1 << 15)
-#define CORE_IO_PAD_PWR_SWITCH  (1 << 16)
-#define CORE_HC_SELECT_IN_EN	BIT(18)
+#define CORE_IO_PAD_PWR_SWITCH	(1 << 16)
+#define CORE_HC_SELECT_IN_EN	(1 << 18)
 #define CORE_HC_SELECT_IN_HS400	(6 << 19)
 #define CORE_HC_SELECT_IN_MASK	(7 << 19)
+#define CORE_VENDOR_SPEC_POR_VAL	0xA1C
 
-#define CORE_3_0V_SUPPORT	(1 << 25)
-#define CORE_1_8V_SUPPORT	(1 << 26)
-#define CORE_VOLT_SUPPORT	(CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
+#define HC_SW_RST_WAIT_IDLE_DIS	(1 << 20)
+#define HC_SW_RST_REQ (1 << 21)
+#define CORE_ONE_MID_EN     (1 << 25)
+
+#define CORE_8_BIT_SUPPORT		(1 << 18)
+#define CORE_3_3V_SUPPORT		(1 << 24)
+#define CORE_3_0V_SUPPORT		(1 << 25)
+#define CORE_1_8V_SUPPORT		(1 << 26)
+#define CORE_SYS_BUS_SUPPORT_64_BIT	BIT(28)
 
 #define CORE_CSR_CDC_CTLR_CFG0		0x130
-#define CORE_SW_TRIG_FULL_CALIB		BIT(16)
-#define CORE_HW_AUTOCAL_ENA		BIT(17)
+#define CORE_SW_TRIG_FULL_CALIB		(1 << 16)
+#define CORE_HW_AUTOCAL_ENA		(1 << 17)
 
 #define CORE_CSR_CDC_CTLR_CFG1		0x134
 #define CORE_CSR_CDC_CAL_TIMER_CFG0	0x138
-#define CORE_TIMER_ENA			BIT(16)
+#define CORE_TIMER_ENA			(1 << 16)
 
 #define CORE_CSR_CDC_CAL_TIMER_CFG1	0x13C
 #define CORE_CSR_CDC_REFCOUNT_CFG	0x140
@@ -94,305 +117,397 @@
 #define CORE_CSR_CDC_DELAY_CFG		0x150
 #define CORE_CDC_SLAVE_DDA_CFG		0x160
 #define CORE_CSR_CDC_STATUS0		0x164
-#define CORE_CALIBRATION_DONE		BIT(0)
+#define CORE_CALIBRATION_DONE		(1 << 0)
 
 #define CORE_CDC_ERROR_CODE_MASK	0x7000000
 
+#define CQ_CMD_DBG_RAM	                0x110
+#define CQ_CMD_DBG_RAM_WA               0x150
+#define CQ_CMD_DBG_RAM_OL               0x154
+
 #define CORE_CSR_CDC_GEN_CFG		0x178
-#define CORE_CDC_SWITCH_BYPASS_OFF	BIT(0)
-#define CORE_CDC_SWITCH_RC_EN		BIT(1)
+#define CORE_CDC_SWITCH_BYPASS_OFF	(1 << 0)
+#define CORE_CDC_SWITCH_RC_EN		(1 << 1)
 
-#define CORE_CDC_T4_DLY_SEL		BIT(0)
-#define CORE_CMDIN_RCLK_EN		BIT(1)
-#define CORE_START_CDC_TRAFFIC		BIT(6)
+#define CORE_CDC_T4_DLY_SEL		(1 << 0)
+#define CORE_CMDIN_RCLK_EN		(1 << 1)
+#define CORE_START_CDC_TRAFFIC		(1 << 6)
 
-#define CORE_PWRSAVE_DLL	BIT(3)
+#define CORE_PWRSAVE_DLL	(1 << 3)
+#define CORE_FIFO_ALT_EN	(1 << 10)
+#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
 
-#define DDR_CONFIG_POR_VAL	0x80040853
+#define CORE_DDR_CAL_EN		(1 << 0)
+#define CORE_FLL_CYCLE_CNT	(1 << 18)
+#define CORE_DLL_CLOCK_DISABLE	(1 << 21)
 
+#define DDR_CONFIG_POR_VAL		0x80040873
+#define DLL_USR_CTL_POR_VAL		0x10800
+#define ENABLE_DLL_LOCK_STATUS		(1 << 26)
+#define FINE_TUNE_MODE_EN		(1 << 27)
+#define BIAS_OK_SIGNAL			(1 << 29)
+#define DLL_CONFIG_3_POR_VAL		0x10
+
+/* 512 descriptors */
+#define SDHCI_MSM_MAX_SEGMENTS  (1 << 9)
+#define SDHCI_MSM_MMC_CLK_GATE_DELAY	200 /* msecs */
+
+#define CORE_FREQ_100MHZ	(100 * 1000 * 1000)
+#define TCXO_FREQ		19200000
 
 #define INVALID_TUNING_PHASE	-1
-#define SDHCI_MSM_MIN_CLOCK	400000
-#define CORE_FREQ_100MHZ	(100 * 1000 * 1000)
+#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
 
-#define CDR_SELEXT_SHIFT	20
-#define CDR_SELEXT_MASK		(0xf << CDR_SELEXT_SHIFT)
-#define CMUX_SHIFT_PHASE_SHIFT	24
-#define CMUX_SHIFT_PHASE_MASK	(7 << CMUX_SHIFT_PHASE_SHIFT)
+#define NUM_TUNING_PHASES		16
+#define MAX_DRV_TYPES_SUPPORTED_HS200	4
+#define MSM_AUTOSUSPEND_DELAY_MS 100
 
-#define MSM_MMC_AUTOSUSPEND_DELAY_MS	50
+struct sdhci_msm_offset {
+	u32 CORE_MCI_DATA_CNT;
+	u32 CORE_MCI_STATUS;
+	u32 CORE_MCI_FIFO_CNT;
+	u32 CORE_MCI_VERSION;
+	u32 CORE_GENERICS;
+	u32 CORE_TESTBUS_CONFIG;
+	u32 CORE_TESTBUS_SEL2_BIT;
+	u32 CORE_TESTBUS_ENA;
+	u32 CORE_TESTBUS_SEL2;
+	u32 CORE_PWRCTL_STATUS;
+	u32 CORE_PWRCTL_MASK;
+	u32 CORE_PWRCTL_CLEAR;
+	u32 CORE_PWRCTL_CTL;
+	u32 CORE_SDCC_DEBUG_REG;
+	u32 CORE_DLL_CONFIG;
+	u32 CORE_DLL_STATUS;
+	u32 CORE_VENDOR_SPEC;
+	u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
+	u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
+	u32 CORE_VENDOR_SPEC_FUNC2;
+	u32 CORE_VENDOR_SPEC_CAPABILITIES0;
+	u32 CORE_DDR_200_CFG;
+	u32 CORE_VENDOR_SPEC3;
+	u32 CORE_DLL_CONFIG_2;
+	u32 CORE_DLL_CONFIG_3;
+	u32 CORE_DDR_CONFIG;
+	u32 CORE_DDR_CONFIG_OLD; /* Applcable to sddcc minor ver < 0x49 only */
+	u32 CORE_DLL_USR_CTL; /* Present on SDCC5.1 onwards */
+};
+
+struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
+	.CORE_MCI_DATA_CNT = 0x35C,
+	.CORE_MCI_STATUS = 0x324,
+	.CORE_MCI_FIFO_CNT = 0x308,
+	.CORE_MCI_VERSION = 0x318,
+	.CORE_GENERICS = 0x320,
+	.CORE_TESTBUS_CONFIG = 0x32C,
+	.CORE_TESTBUS_SEL2_BIT = 3,
+	.CORE_TESTBUS_ENA = (1 << 31),
+	.CORE_TESTBUS_SEL2 = (1 << 3),
+	.CORE_PWRCTL_STATUS = 0x240,
+	.CORE_PWRCTL_MASK = 0x244,
+	.CORE_PWRCTL_CLEAR = 0x248,
+	.CORE_PWRCTL_CTL = 0x24C,
+	.CORE_SDCC_DEBUG_REG = 0x358,
+	.CORE_DLL_CONFIG = 0x200,
+	.CORE_DLL_STATUS = 0x208,
+	.CORE_VENDOR_SPEC = 0x20C,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
+	.CORE_VENDOR_SPEC_FUNC2 = 0x210,
+	.CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
+	.CORE_DDR_200_CFG = 0x224,
+	.CORE_VENDOR_SPEC3 = 0x250,
+	.CORE_DLL_CONFIG_2 = 0x254,
+	.CORE_DLL_CONFIG_3 = 0x258,
+	.CORE_DDR_CONFIG = 0x25C,
+	.CORE_DLL_USR_CTL = 0x388,
+};
+
+struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
+	.CORE_MCI_DATA_CNT = 0x30,
+	.CORE_MCI_STATUS = 0x34,
+	.CORE_MCI_FIFO_CNT = 0x44,
+	.CORE_MCI_VERSION = 0x050,
+	.CORE_GENERICS = 0x70,
+	.CORE_TESTBUS_CONFIG = 0x0CC,
+	.CORE_TESTBUS_SEL2_BIT = 4,
+	.CORE_TESTBUS_ENA = (1 << 3),
+	.CORE_TESTBUS_SEL2 = (1 << 4),
+	.CORE_PWRCTL_STATUS = 0xDC,
+	.CORE_PWRCTL_MASK = 0xE0,
+	.CORE_PWRCTL_CLEAR = 0xE4,
+	.CORE_PWRCTL_CTL = 0xE8,
+	.CORE_SDCC_DEBUG_REG = 0x124,
+	.CORE_DLL_CONFIG = 0x100,
+	.CORE_DLL_STATUS = 0x108,
+	.CORE_VENDOR_SPEC = 0x10C,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
+	.CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
+	.CORE_VENDOR_SPEC_FUNC2 = 0x110,
+	.CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
+	.CORE_DDR_200_CFG = 0x184,
+	.CORE_VENDOR_SPEC3 = 0x1B0,
+	.CORE_DLL_CONFIG_2 = 0x1B4,
+	.CORE_DLL_CONFIG_3 = 0x1B8,
+	.CORE_DDR_CONFIG_OLD = 0x1B8, /* Applicable to sdcc minor ver < 0x49 */
+	.CORE_DDR_CONFIG = 0x1BC,
+};
+
+u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	return readb_relaxed(base_addr + offset);
+}
+
+u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	return readl_relaxed(base_addr + offset);
+}
+
+void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	writeb_relaxed(val, base_addr + offset);
+}
+
+void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	void __iomem *base_addr;
+
+	if (msm_host->mci_removed)
+		base_addr = host->ioaddr;
+	else
+		base_addr = msm_host->core_mem;
+
+	writel_relaxed(val, base_addr + offset);
+}
 
 /* Timeout value to avoid infinite waiting for pwr_irq */
 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
 
-#define msm_host_readl(msm_host, host, offset) \
-	msm_host->var_ops->msm_readl_relaxed(host, offset)
-
-#define msm_host_writel(msm_host, val, host, offset) \
-	msm_host->var_ops->msm_writel_relaxed(val, host, offset)
-
-struct sdhci_msm_offset {
-	u32 core_hc_mode;
-	u32 core_mci_data_cnt;
-	u32 core_mci_status;
-	u32 core_mci_fifo_cnt;
-	u32 core_mci_version;
-	u32 core_generics;
-	u32 core_testbus_config;
-	u32 core_testbus_sel2_bit;
-	u32 core_testbus_ena;
-	u32 core_testbus_sel2;
-	u32 core_pwrctl_status;
-	u32 core_pwrctl_mask;
-	u32 core_pwrctl_clear;
-	u32 core_pwrctl_ctl;
-	u32 core_sdcc_debug_reg;
-	u32 core_dll_config;
-	u32 core_dll_status;
-	u32 core_vendor_spec;
-	u32 core_vendor_spec_adma_err_addr0;
-	u32 core_vendor_spec_adma_err_addr1;
-	u32 core_vendor_spec_func2;
-	u32 core_vendor_spec_capabilities0;
-	u32 core_ddr_200_cfg;
-	u32 core_vendor_spec3;
-	u32 core_dll_config_2;
-	u32 core_ddr_config;
-	u32 core_ddr_config_2;
+static const u32 tuning_block_64[] = {
+	0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
+	0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
+	0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
+	0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
 };
 
-static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
-	.core_mci_data_cnt = 0x35c,
-	.core_mci_status = 0x324,
-	.core_mci_fifo_cnt = 0x308,
-	.core_mci_version = 0x318,
-	.core_generics = 0x320,
-	.core_testbus_config = 0x32c,
-	.core_testbus_sel2_bit = 3,
-	.core_testbus_ena = (1 << 31),
-	.core_testbus_sel2 = (1 << 3),
-	.core_pwrctl_status = 0x240,
-	.core_pwrctl_mask = 0x244,
-	.core_pwrctl_clear = 0x248,
-	.core_pwrctl_ctl = 0x24c,
-	.core_sdcc_debug_reg = 0x358,
-	.core_dll_config = 0x200,
-	.core_dll_status = 0x208,
-	.core_vendor_spec = 0x20c,
-	.core_vendor_spec_adma_err_addr0 = 0x214,
-	.core_vendor_spec_adma_err_addr1 = 0x218,
-	.core_vendor_spec_func2 = 0x210,
-	.core_vendor_spec_capabilities0 = 0x21c,
-	.core_ddr_200_cfg = 0x224,
-	.core_vendor_spec3 = 0x250,
-	.core_dll_config_2 = 0x254,
-	.core_ddr_config = 0x258,
-	.core_ddr_config_2 = 0x25c,
+static const u32 tuning_block_128[] = {
+	0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
+	0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
+	0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
+	0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
+	0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
+	0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
+	0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
+	0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
 };
 
-static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
-	.core_hc_mode = 0x78,
-	.core_mci_data_cnt = 0x30,
-	.core_mci_status = 0x34,
-	.core_mci_fifo_cnt = 0x44,
-	.core_mci_version = 0x050,
-	.core_generics = 0x70,
-	.core_testbus_config = 0x0cc,
-	.core_testbus_sel2_bit = 4,
-	.core_testbus_ena = (1 << 3),
-	.core_testbus_sel2 = (1 << 4),
-	.core_pwrctl_status = 0xdc,
-	.core_pwrctl_mask = 0xe0,
-	.core_pwrctl_clear = 0xe4,
-	.core_pwrctl_ctl = 0xe8,
-	.core_sdcc_debug_reg = 0x124,
-	.core_dll_config = 0x100,
-	.core_dll_status = 0x108,
-	.core_vendor_spec = 0x10c,
-	.core_vendor_spec_adma_err_addr0 = 0x114,
-	.core_vendor_spec_adma_err_addr1 = 0x118,
-	.core_vendor_spec_func2 = 0x110,
-	.core_vendor_spec_capabilities0 = 0x11c,
-	.core_ddr_200_cfg = 0x184,
-	.core_vendor_spec3 = 0x1b0,
-	.core_dll_config_2 = 0x1b4,
-	.core_ddr_config = 0x1b8,
-	.core_ddr_config_2 = 0x1bc,
-};
+/* global to hold each slot instance for debug */
+static struct sdhci_msm_host *sdhci_slot[2];
 
-struct sdhci_msm_variant_ops {
-	u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
-	void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
-			u32 offset);
-};
+static int disable_slots;
+/* root can write, others read */
+module_param(disable_slots, int, 0644);
 
-/*
- * From V5, register spaces have changed. Wrap this info in a structure
- * and choose the data_structure based on version info mentioned in DT.
- */
-struct sdhci_msm_variant_info {
-	bool mci_removed;
-	const struct sdhci_msm_variant_ops *var_ops;
-	const struct sdhci_msm_offset *offset;
-};
-
-struct sdhci_msm_host {
-	struct platform_device *pdev;
-	void __iomem *core_mem;	/* MSM SDCC mapped address */
-	int pwr_irq;		/* power irq */
-	struct clk *bus_clk;	/* SDHC bus voter clock */
-	struct clk *xo_clk;	/* TCXO clk needed for FLL feature of cm_dll*/
-	struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
-	unsigned long clk_rate;
-	struct mmc_host *mmc;
-	bool use_14lpp_dll_reset;
-	bool tuning_done;
-	bool calibration_done;
-	u8 saved_tuning_phase;
-	bool use_cdclp533;
-	u32 curr_pwr_state;
-	u32 curr_io_level;
-	wait_queue_head_t pwr_irq_wait;
-	bool pwr_irq_flag;
-	u32 caps_0;
-	bool mci_removed;
-	const struct sdhci_msm_variant_ops *var_ops;
-	const struct sdhci_msm_offset *offset;
-};
-
-static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
-{
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-
-	return msm_host->offset;
-}
-
-/*
- * APIs to read/write to vendor specific registers which were there in the
- * core_mem region before MCI was removed.
- */
-static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
-		u32 offset)
-{
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-
-	return readl_relaxed(msm_host->core_mem + offset);
-}
-
-static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
-		u32 offset)
-{
-	return readl_relaxed(host->ioaddr + offset);
-}
-
-static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
-		struct sdhci_host *host, u32 offset)
-{
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-
-	writel_relaxed(val, msm_host->core_mem + offset);
-}
-
-static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
-		struct sdhci_host *host, u32 offset)
-{
-	writel_relaxed(val, host->ioaddr + offset);
-}
-
-static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
-						    unsigned int clock)
-{
-	struct mmc_ios ios = host->mmc->ios;
+enum vdd_io_level {
+	/* set vdd_io_data->low_vol_level */
+	VDD_IO_LOW,
+	/* set vdd_io_data->high_vol_level */
+	VDD_IO_HIGH,
 	/*
-	 * The SDHC requires internal clock frequency to be double the
-	 * actual clock that will be set for DDR mode. The controller
-	 * uses the faster clock(100/400MHz) for some of its parts and
-	 * send the actual required clock (50/200MHz) to the card.
+	 * set whatever there in voltage_level (third argument) of
+	 * sdhci_msm_set_vdd_io_vol() function.
 	 */
-	if (ios.timing == MMC_TIMING_UHS_DDR50 ||
-	    ios.timing == MMC_TIMING_MMC_DDR52 ||
-	    ios.timing == MMC_TIMING_MMC_HS400 ||
-	    host->flags & SDHCI_HS400_TUNING)
-		clock *= 2;
-	return clock;
-}
+	VDD_IO_SET_LEVEL,
+};
 
-static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
-					    unsigned int clock)
+enum dll_init_context {
+	DLL_INIT_NORMAL = 0,
+	DLL_INIT_FROM_CX_COLLAPSE_EXIT,
+};
+
+/* MSM platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
+						u8 poll)
 {
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	struct mmc_ios curr_ios = host->mmc->ios;
-	struct clk *core_clk = msm_host->bulk_clks[0].clk;
-	int rc;
-
-	clock = msm_get_clock_rate_for_bus_mode(host, clock);
-	rc = clk_set_rate(core_clk, clock);
-	if (rc) {
-		pr_err("%s: Failed to set clock at rate %u at timing %d\n",
-		       mmc_hostname(host->mmc), clock,
-		       curr_ios.timing);
-		return;
-	}
-	msm_host->clk_rate = clock;
-	pr_debug("%s: Setting clock at rate %lu at timing %d\n",
-		 mmc_hostname(host->mmc), clk_get_rate(core_clk),
-		 curr_ios.timing);
-}
-
-/* Platform specific tuning */
-static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
-{
+	int rc = 0;
 	u32 wait_cnt = 50;
-	u8 ck_out_en;
+	u8 ck_out_en = 0;
 	struct mmc_host *mmc = host->mmc;
-	const struct sdhci_msm_offset *msm_offset =
-					sdhci_priv_msm_offset(host);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
-	/* Poll for CK_OUT_EN bit.  max. poll time = 50us */
+	/* poll for CK_OUT_EN bit.  max. poll time = 50us */
 	ck_out_en = !!(readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config) & CORE_CK_OUT_EN);
+		msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
 
 	while (ck_out_en != poll) {
 		if (--wait_cnt == 0) {
-			dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
-			       mmc_hostname(mmc), poll);
-			return -ETIMEDOUT;
+			pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
+				mmc_hostname(mmc), __func__, poll);
+			rc = -ETIMEDOUT;
+			goto out;
 		}
 		udelay(1);
 
 		ck_out_en = !!(readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config) & CORE_CK_OUT_EN);
+			msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
 	}
+out:
+	return rc;
+}
 
-	return 0;
+/*
+ * Enable CDR to track changes of DAT lines and adjust sampling
+ * point according to voltage/temperature variations
+ */
+static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
+{
+	int rc = 0;
+	u32 config;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+	config |= CORE_CDR_EN;
+	config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
+	writel_relaxed(config, host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+
+	rc = msm_dll_poll_ck_out_en(host, 0);
+	if (rc)
+		goto err;
+
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+	rc = msm_dll_poll_ck_out_en(host, 1);
+	if (rc)
+		goto err;
+	goto out;
+err:
+	pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
+out:
+	return rc;
+}
+
+static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
+				*attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u32 tmp;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &tmp)) {
+		spin_lock_irqsave(&host->lock, flags);
+		msm_host->en_auto_cmd21 = !!tmp;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static ssize_t show_auto_cmd21(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
+}
+
+/* MSM auto-tuning handler */
+static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
+					    bool enable,
+					    u32 type)
+{
+	int rc = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 val = 0;
+
+	if (!msm_host->en_auto_cmd21)
+		return 0;
+
+	if (type == MMC_SEND_TUNING_BLOCK_HS200)
+		val = CORE_HC_AUTO_CMD21_EN;
+	else
+		return 0;
+
+	if (enable) {
+		rc = msm_enable_cdr_cm_sdc4_dll(host);
+		writel_relaxed(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) | val,
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+	} else {
+		writel_relaxed(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) & ~val,
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+	}
+	return rc;
 }
 
 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
 {
-	int rc;
-	static const u8 grey_coded_phase_table[] = {
-		0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
-		0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
-	};
+	int rc = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+					0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
+					0x8};
 	unsigned long flags;
 	u32 config;
 	struct mmc_host *mmc = host->mmc;
-	const struct sdhci_msm_offset *msm_offset =
-					sdhci_priv_msm_offset(host);
 
-	if (phase > 0xf)
-		return -EINVAL;
-
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
+	config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 	config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
 	config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+	writel_relaxed(config, host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
 	rc = msm_dll_poll_ck_out_en(host, 0);
@@ -403,31 +518,36 @@
 	 * Write the selected DLL clock output phase (0 ... 15)
 	 * to CDR_SELEXT bit field of DLL_CONFIG register.
 	 */
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
-	config &= ~CDR_SELEXT_MASK;
-	config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+	writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG)
+			& ~(0xF << 20))
+			| (grey_coded_phase_table[phase] << 20)),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
-	config |= CORE_CK_OUT_EN;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+	/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
 	/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
 	rc = msm_dll_poll_ck_out_en(host, 1);
 	if (rc)
 		goto err_out;
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
+	config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 	config |= CORE_CDR_EN;
 	config &= ~CORE_CDR_EXT_EN;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+	writel_relaxed(config, host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
 	goto out;
 
 err_out:
-	dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
-	       mmc_hostname(mmc), phase);
+	pr_err("%s: %s: Failed to set DLL phase: %d\n",
+		mmc_hostname(mmc), __func__, phase);
 out:
 	spin_unlock_irqrestore(&host->lock, flags);
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return rc;
 }
 
@@ -442,19 +562,20 @@
  */
 
 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
-					   u8 *phase_table, u8 total_phases)
+				u8 *phase_table, u8 total_phases)
 {
 	int ret;
 	u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
-	u8 phases_per_row[MAX_PHASES] = { 0 };
+	u8 phases_per_row[MAX_PHASES] = {0};
 	int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
 	int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
 	bool phase_0_found = false, phase_15_found = false;
 	struct mmc_host *mmc = host->mmc;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	if (!total_phases || (total_phases > MAX_PHASES)) {
-		dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
-		       mmc_hostname(mmc), total_phases);
+		pr_err("%s: %s: invalid argument: total_phases=%d\n",
+			mmc_hostname(mmc), __func__, total_phases);
 		return -EINVAL;
 	}
 
@@ -512,7 +633,7 @@
 		i = phases_15;
 		for (cnt = 0; cnt < phases_0; cnt++) {
 			ranges[phase_15_raw_index][i] =
-			    ranges[phase_0_raw_index][cnt];
+				ranges[phase_0_raw_index][cnt];
 			if (++i >= MAX_PHASES)
 				break;
 		}
@@ -528,26 +649,29 @@
 		}
 	}
 
-	i = (curr_max * 3) / 4;
+	i = ((curr_max * 3) / 4);
 	if (i)
 		i--;
 
-	ret = ranges[selected_row_index][i];
+	ret = (int)ranges[selected_row_index][i];
 
 	if (ret >= MAX_PHASES) {
 		ret = -EINVAL;
-		dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
-		       mmc_hostname(mmc), ret);
+		pr_err("%s: %s: invalid phase selected=%d\n",
+			mmc_hostname(mmc), __func__, ret);
 	}
 
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return ret;
 }
 
 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
 {
-	u32 mclk_freq = 0, config;
-	const struct sdhci_msm_offset *msm_offset =
-					sdhci_priv_msm_offset(host);
+	u32 mclk_freq = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
 	/* Program the MCLK value to MCLK_FREQ bit field */
 	if (host->clock <= 112000000)
@@ -564,307 +688,234 @@
 		mclk_freq = 5;
 	else if (host->clock <= 187000000)
 		mclk_freq = 6;
-	else if (host->clock <= 200000000)
+	else if (host->clock <= 208000000)
 		mclk_freq = 7;
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
-	config &= ~CMUX_SHIFT_PHASE_MASK;
-	config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+	writel_relaxed(((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG)
+			& ~(7 << 24)) | (mclk_freq << 24)),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 }
 
-/* Initialize the DLL (Programmable Delay Line) */
-static int msm_init_cm_dll(struct sdhci_host *host)
+/* Initialize the DLL (Programmable Delay Line ) */
+static int msm_init_cm_dll(struct sdhci_host *host,
+				enum dll_init_context init_context)
 {
-	struct mmc_host *mmc = host->mmc;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	int wait_cnt = 50;
-	unsigned long flags;
-	u32 config;
-	const struct sdhci_msm_offset *msm_offset =
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
 					msm_host->offset;
+	struct mmc_host *mmc = host->mmc;
+	int rc = 0;
+	unsigned long flags;
+	u32 wait_cnt;
+	bool prev_pwrsave, curr_pwrsave;
 
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
-
+	prev_pwrsave = !!(readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+	curr_pwrsave = prev_pwrsave;
 	/*
 	 * Make sure that clock is always enabled when DLL
 	 * tuning is in progress. Keeping PWRSAVE ON may
-	 * turn off the clock.
+	 * turn off the clock. So let's disable the PWRSAVE
+	 * here and re-enable it once tuning is completed.
 	 */
-	config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
-	config &= ~CORE_CLK_PWRSAVE;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
-
-	if (msm_host->use_14lpp_dll_reset) {
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config);
-		config &= ~CORE_CK_OUT_EN;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config);
-
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config_2);
-		config |= CORE_DLL_CLOCK_DISABLE;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config_2);
+	if (prev_pwrsave) {
+		writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC)
+			& ~CORE_CLK_PWRSAVE), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+		curr_pwrsave = false;
 	}
 
-	config = readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config);
-	config |= CORE_DLL_RST;
-	writel_relaxed(config, host->ioaddr +
-			msm_offset->core_dll_config);
+	if (msm_host->use_updated_dll_reset) {
+		/* Disable the DLL clock */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				& ~CORE_CK_OUT_EN), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
 
-	config = readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config);
-	config |= CORE_DLL_PDN;
-	writel_relaxed(config, host->ioaddr +
-			msm_offset->core_dll_config);
-	msm_cm_dll_set_freq(host);
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2)
+				| CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2);
+	}
 
-	if (msm_host->use_14lpp_dll_reset &&
-	    !IS_ERR_OR_NULL(msm_host->xo_clk)) {
+	/* Write 1 to DLL_RST bit of DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+	/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
+		host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+	if (msm_host->use_updated_dll_reset) {
 		u32 mclk_freq = 0;
 
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config_2);
-		config &= CORE_FLL_CYCLE_CNT;
-		if (config)
-			mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
-					clk_get_rate(msm_host->xo_clk));
-		else
-			mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
-					clk_get_rate(msm_host->xo_clk));
+		/*
+		 * Only configure the mclk_freq in normal DLL init
+		 * context. If the DLL init is coming from
+		 * CX Collapse Exit context, the host->clock may be zero.
+		 * The DLL_CONFIG_2 register has already been restored to
+		 * proper value prior to getting here.
+		 */
+		if (init_context == DLL_INIT_NORMAL) {
+			switch (host->clock) {
+			case 208000000:
+			case 202000000:
+			case 201500000:
+			case 200000000:
+				mclk_freq = 42;
+				break;
+			case 192000000:
+				mclk_freq = 40;
+				break;
+			default:
+				pr_err("%s: %s: Error. Unsupported clk freq\n",
+					mmc_hostname(mmc), __func__);
+				rc = -EINVAL;
+				goto out;
+			}
 
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config_2);
-		config &= ~(0xFF << 10);
-		config |= mclk_freq << 10;
+			if ((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2)
+				& CORE_FLL_CYCLE_CNT))
+				mclk_freq *= 2;
 
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config_2);
+			writel_relaxed(((readl_relaxed(host->ioaddr +
+			   msm_host_offset->CORE_DLL_CONFIG_2)
+			   & ~(0xFF << 10)) | (mclk_freq << 10)),
+			   host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+		}
 		/* wait for 5us before enabling DLL clock */
 		udelay(5);
 	}
 
-	config = readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config);
-	config &= ~CORE_DLL_RST;
-	writel_relaxed(config, host->ioaddr +
-			msm_offset->core_dll_config);
+	/* Write 0 to DLL_RST bit of DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
-	config = readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config);
-	config &= ~CORE_DLL_PDN;
-	writel_relaxed(config, host->ioaddr +
-			msm_offset->core_dll_config);
+	/* Write 0 to DLL_PDN bit of DLL_CONFIG register */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
 
-	if (msm_host->use_14lpp_dll_reset) {
-		msm_cm_dll_set_freq(host);
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config_2);
-		config &= ~CORE_DLL_CLOCK_DISABLE;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config_2);
+	if (msm_host->use_updated_dll_reset) {
+		/* Enable the DLL clock */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2)
+				& ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG_2);
 	}
 
-	config = readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config);
-	config |= CORE_DLL_EN;
-	writel_relaxed(config, host->ioaddr +
-			msm_offset->core_dll_config);
+	/* Configure Tassadar DLL (Only applicable for 7FF projects) */
+	if (msm_host->use_7nm_dll) {
+		writel_relaxed(DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
+			ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL, host->ioaddr +
+			msm_host_offset->CORE_DLL_USR_CTL);
 
-	config = readl_relaxed(host->ioaddr +
-			msm_offset->core_dll_config);
-	config |= CORE_CK_OUT_EN;
-	writel_relaxed(config, host->ioaddr +
-			msm_offset->core_dll_config);
+		writel_relaxed(DLL_CONFIG_3_POR_VAL, host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3);
+	}
 
+	/* Set DLL_EN bit to 1. */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+	/* Set CK_OUT_EN bit to 1. */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG)
+			| CORE_CK_OUT_EN), host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG);
+
+	wait_cnt = 50;
 	/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
-	while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
-		 CORE_DLL_LOCK)) {
+	while (!(readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
 		/* max. wait for 50us sec for LOCK bit to be set */
 		if (--wait_cnt == 0) {
-			dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
-			       mmc_hostname(mmc));
-			spin_unlock_irqrestore(&host->lock, flags);
-			return -ETIMEDOUT;
+			pr_err("%s: %s: DLL failed to LOCK\n",
+				mmc_hostname(mmc), __func__);
+			rc = -ETIMEDOUT;
+			goto out;
 		}
+		/* wait for 1us before polling again */
 		udelay(1);
 	}
 
+out:
+	/* Restore the correct PWRSAVE state */
+	if (prev_pwrsave ^ curr_pwrsave) {
+		u32 reg = readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+
+		if (prev_pwrsave)
+			reg |= CORE_CLK_PWRSAVE;
+		else
+			reg &= ~CORE_CLK_PWRSAVE;
+
+		writel_relaxed(reg, host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+	}
+
 	spin_unlock_irqrestore(&host->lock, flags);
-	return 0;
-}
-
-static void msm_hc_select_default(struct sdhci_host *host)
-{
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	u32 config;
-	const struct sdhci_msm_offset *msm_offset =
-					msm_host->offset;
-
-	if (!msm_host->use_cdclp533) {
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_vendor_spec3);
-		config &= ~CORE_PWRSAVE_DLL;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_vendor_spec3);
-	}
-
-	config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
-	config &= ~CORE_HC_MCLK_SEL_MASK;
-	config |= CORE_HC_MCLK_SEL_DFLT;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
-
-	/*
-	 * Disable HC_SELECT_IN to be able to use the UHS mode select
-	 * configuration from Host Control2 register for all other
-	 * modes.
-	 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
-	 * in VENDOR_SPEC_FUNC
-	 */
-	config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
-	config &= ~CORE_HC_SELECT_IN_EN;
-	config &= ~CORE_HC_SELECT_IN_MASK;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
-
-	/*
-	 * Make sure above writes impacting free running MCLK are completed
-	 * before changing the clk_rate at GCC.
-	 */
-	wmb();
-}
-
-static void msm_hc_select_hs400(struct sdhci_host *host)
-{
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	struct mmc_ios ios = host->mmc->ios;
-	u32 config, dll_lock;
-	int rc;
-	const struct sdhci_msm_offset *msm_offset =
-					msm_host->offset;
-
-	/* Select the divided clock (free running MCLK/2) */
-	config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
-	config &= ~CORE_HC_MCLK_SEL_MASK;
-	config |= CORE_HC_MCLK_SEL_HS400;
-
-	writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
-	/*
-	 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
-	 * register
-	 */
-	if ((msm_host->tuning_done || ios.enhanced_strobe) &&
-	    !msm_host->calibration_done) {
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_vendor_spec);
-		config |= CORE_HC_SELECT_IN_HS400;
-		config |= CORE_HC_SELECT_IN_EN;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_vendor_spec);
-	}
-	if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
-		/*
-		 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
-		 * core_dll_status to be set. This should get set
-		 * within 15 us at 200 MHz.
-		 */
-		rc = readl_relaxed_poll_timeout(host->ioaddr +
-						msm_offset->core_dll_status,
-						dll_lock,
-						(dll_lock &
-						(CORE_DLL_LOCK |
-						CORE_DDR_DLL_LOCK)), 10,
-						1000);
-		if (rc == -ETIMEDOUT)
-			pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
-			       mmc_hostname(host->mmc), dll_lock);
-	}
-	/*
-	 * Make sure above writes impacting free running MCLK are completed
-	 * before changing the clk_rate at GCC.
-	 */
-	wmb();
-}
-
-/*
- * sdhci_msm_hc_select_mode :- In general all timing modes are
- * controlled via UHS mode select in Host Control2 register.
- * eMMC specific HS200/HS400 doesn't have their respective modes
- * defined here, hence we use these values.
- *
- * HS200 - SDR104 (Since they both are equivalent in functionality)
- * HS400 - This involves multiple configurations
- *		Initially SDR104 - when tuning is required as HS200
- *		Then when switching to DDR @ 400MHz (HS400) we use
- *		the vendor specific HC_SELECT_IN to control the mode.
- *
- * In addition to controlling the modes we also need to select the
- * correct input clock for DLL depending on the mode.
- *
- * HS400 - divided clock (free running MCLK/2)
- * All other modes - default (free running MCLK)
- */
-static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
-{
-	struct mmc_ios ios = host->mmc->ios;
-
-	if (ios.timing == MMC_TIMING_MMC_HS400 ||
-	    host->flags & SDHCI_HS400_TUNING)
-		msm_hc_select_hs400(host);
-	else
-		msm_hc_select_default(host);
+	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
+	return rc;
 }
 
 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
 {
+	u32 calib_done;
+	int ret = 0;
+	int cdc_err = 0;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	u32 config, calib_done;
-	int ret;
-	const struct sdhci_msm_offset *msm_offset =
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
 					msm_host->offset;
 
-	pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	/* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_200_CFG)
+			& ~CORE_CDC_T4_DLY_SEL),
+			host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+	/* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+			& ~CORE_CDC_SWITCH_BYPASS_OFF),
+			host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+	/* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+			| CORE_CDC_SWITCH_RC_EN),
+			host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+	/* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_200_CFG)
+			& ~CORE_START_CDC_TRAFFIC),
+			host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
 
 	/*
-	 * Retuning in HS400 (DDR mode) will fail, just reset the
-	 * tuning block and restore the saved tuning phase.
+	 * Perform CDC Register Initialization Sequence
+	 *
+	 * CORE_CSR_CDC_CTLR_CFG0	0x11800EC
+	 * CORE_CSR_CDC_CTLR_CFG1	0x3011111
+	 * CORE_CSR_CDC_CAL_TIMER_CFG0	0x1201000
+	 * CORE_CSR_CDC_CAL_TIMER_CFG1	0x4
+	 * CORE_CSR_CDC_REFCOUNT_CFG	0xCB732020
+	 * CORE_CSR_CDC_COARSE_CAL_CFG	0xB19
+	 * CORE_CSR_CDC_DELAY_CFG	0x3AC
+	 * CORE_CDC_OFFSET_CFG		0x0
+	 * CORE_CDC_SLAVE_DDA_CFG	0x16334
 	 */
-	ret = msm_init_cm_dll(host);
-	if (ret)
-		goto out;
-
-	/* Set the selected phase in delay line hw block */
-	ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
-	if (ret)
-		goto out;
-
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
-	config |= CORE_CMD_DAT_TRACK_SEL;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
-
-	config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
-	config &= ~CORE_CDC_T4_DLY_SEL;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
-
-	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
-	config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
-	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
-
-	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
-	config |= CORE_CDC_SWITCH_RC_EN;
-	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
-
-	config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
-	config &= ~CORE_START_CDC_TRAFFIC;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
-
-	/* Perform CDC Register Initialization Sequence */
 
 	writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
 	writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
@@ -878,214 +929,452 @@
 
 	/* CDC HW Calibration */
 
-	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
-	config |= CORE_SW_TRIG_FULL_CALIB;
-	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+	/* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			| CORE_SW_TRIG_FULL_CALIB),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
 
-	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
-	config &= ~CORE_SW_TRIG_FULL_CALIB;
-	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+	/* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			& ~CORE_SW_TRIG_FULL_CALIB),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
 
-	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
-	config |= CORE_HW_AUTOCAL_ENA;
-	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+	/* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			| CORE_HW_AUTOCAL_ENA),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
 
-	config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
-	config |= CORE_TIMER_ENA;
-	writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+	/* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
+			host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
 
-	ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
-					 calib_done,
-					 (calib_done & CORE_CALIBRATION_DONE),
-					 1, 50);
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+
+	/* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
+	ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+		 calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
 
 	if (ret == -ETIMEDOUT) {
-		pr_err("%s: %s: CDC calibration was not completed\n",
-		       mmc_hostname(host->mmc), __func__);
+		pr_err("%s: %s: CDC Calibration was not completed\n",
+				mmc_hostname(host->mmc), __func__);
 		goto out;
 	}
 
-	ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+	/* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
+	cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
 			& CORE_CDC_ERROR_CODE_MASK;
-	if (ret) {
-		pr_err("%s: %s: CDC error code %d\n",
-		       mmc_hostname(host->mmc), __func__, ret);
+	if (cdc_err) {
+		pr_err("%s: %s: CDC Error Code %d\n",
+			mmc_hostname(host->mmc), __func__, cdc_err);
 		ret = -EINVAL;
 		goto out;
 	}
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
-	config |= CORE_START_CDC_TRAFFIC;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
+	/* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_200_CFG)
+			| CORE_START_CDC_TRAFFIC),
+			host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
 out:
-	pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
-		 __func__, ret);
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
 	return ret;
 }
 
 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
 {
-	struct mmc_host *mmc = host->mmc;
-	u32 dll_status, config;
-	int ret;
-	const struct sdhci_msm_offset *msm_offset =
-					sdhci_priv_msm_offset(host);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 dll_status;
+	int ret = 0;
 
-	pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
 
 	/*
-	 * Currently the core_ddr_config register defaults to desired
-	 * configuration on reset. Currently reprogramming the power on
-	 * reset (POR) value in case it might have been modified by
-	 * bootloaders. In the future, if this changes, then the desired
-	 * values will need to be programmed appropriately.
+	 * Reprogramming the value in case it might have been modified by
+	 * bootloaders.
 	 */
-	writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
-			msm_offset->core_ddr_config);
-
-	if (mmc->ios.enhanced_strobe) {
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_ddr_200_cfg);
-		config |= CORE_CMDIN_RCLK_EN;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_ddr_200_cfg);
+	if (msm_host->pdata->rclk_wa) {
+		writel_relaxed(msm_host->pdata->ddr_config, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG);
+	} else if (msm_host->rclk_delay_fix) {
+		writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG);
+	} else {
+		writel_relaxed(DDR_CONFIG_POR_VAL, host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG_OLD);
 	}
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
-	config |= CORE_DDR_CAL_EN;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
+	if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DDR_200_CFG)
+				| CORE_CMDIN_RCLK_EN), host->ioaddr +
+				msm_host_offset->CORE_DDR_200_CFG);
 
-	ret = readl_relaxed_poll_timeout(host->ioaddr +
-					msm_offset->core_dll_status,
-					dll_status,
-					(dll_status & CORE_DDR_DLL_LOCK),
-					10, 1000);
+	/* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_2)
+			| CORE_DDR_CAL_EN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+
+	/* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
+	ret = readl_poll_timeout(host->ioaddr +
+		 msm_host_offset->CORE_DLL_STATUS,
+		 dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
 
 	if (ret == -ETIMEDOUT) {
-		pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
-		       mmc_hostname(host->mmc), __func__);
+		pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
+				mmc_hostname(host->mmc), __func__);
 		goto out;
 	}
 
-	config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3);
-	config |= CORE_PWRSAVE_DLL;
-	writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec3);
+	/*
+	 * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
+	 * when MCLK is gated OFF, it is not gated for less than 0.5us
+	 * and MCLK must be switched on for at-least 1us before DATA
+	 * starts coming. Controllers with 14lpp tech DLL cannot
+	 * guarantee above requirement. So PWRSAVE_DLL should not be
+	 * turned on for host controllers using this DLL.
+	 */
+	if (!msm_host->use_14lpp_dll)
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC3)
+				| CORE_PWRSAVE_DLL), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC3);
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this
+	 * gets completed before its next update to registers within
+	 * hc_mem.
+	 */
+	mb();
+out:
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
+	return ret;
+}
+
+static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
+{
+	int ret = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct mmc_host *mmc = host->mmc;
+
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
+		pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+				mmc_hostname(mmc));
+		return -EINVAL;
+	}
+
+	if (msm_host->calibration_done ||
+		!(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+		return 0;
+	}
 
 	/*
-	 * Drain writebuffer to ensure above DLL calibration
-	 * and PWRSAVE DLL is enabled.
+	 * Reset the tuning block.
 	 */
-	wmb();
+	ret = msm_init_cm_dll(host, DLL_INIT_NORMAL);
+	if (ret)
+		goto out;
+
+	ret = sdhci_msm_cm_dll_sdc4_calibration(host);
 out:
-	pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
-		 __func__, ret);
+	if (!ret)
+		msm_host->calibration_done = true;
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
 	return ret;
 }
 
 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
 {
+	int ret = 0;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	struct mmc_host *mmc = host->mmc;
-	int ret;
-	u32 config;
-	const struct sdhci_msm_offset *msm_offset =
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
 					msm_host->offset;
 
-	pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
 
 	/*
 	 * Retuning in HS400 (DDR mode) will fail, just reset the
 	 * tuning block and restore the saved tuning phase.
 	 */
-	ret = msm_init_cm_dll(host);
+	ret = msm_init_cm_dll(host, DLL_INIT_NORMAL);
 	if (ret)
 		goto out;
 
-	if (!mmc->ios.enhanced_strobe) {
-		/* Set the selected phase in delay line hw block */
-		ret = msm_config_cm_dll_phase(host,
-					      msm_host->saved_tuning_phase);
-		if (ret)
-			goto out;
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config);
-		config |= CORE_CMD_DAT_TRACK_SEL;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config);
-	}
+	/* Set the selected phase in delay line hw block */
+	ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+	if (ret)
+		goto out;
+
+	/* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				| CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
 
 	if (msm_host->use_cdclp533)
+		/* Calibrate CDCLP533 DLL HW */
 		ret = sdhci_msm_cdclp533_calibration(host);
 	else
+		/* Calibrate CM_DLL_SDC4 HW */
 		ret = sdhci_msm_cm_dll_sdc4_calibration(host);
 out:
-	pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
-		 __func__, ret);
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
 	return ret;
 }
 
-static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
+		u8 drv_type)
 {
-	struct sdhci_host *host = mmc_priv(mmc);
+	struct mmc_command cmd = {0};
+	struct mmc_request mrq = {NULL};
+	struct mmc_host *mmc = host->mmc;
+	u8 val = ((drv_type << 4) | 2);
+
+	cmd.opcode = MMC_SWITCH;
+	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+		(EXT_CSD_HS_TIMING << 16) |
+		(val << 8) |
+		EXT_CSD_CMD_SET_NORMAL;
+	cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
+	/* 1 sec */
+	cmd.busy_timeout = 1000 * 1000;
+
+	memset(cmd.resp, 0, sizeof(cmd.resp));
+	cmd.retries = 3;
+
+	mrq.cmd = &cmd;
+	cmd.data = NULL;
+
+	mmc_wait_for_req(mmc, &mrq);
+	pr_debug("%s: %s: set card drive type to %d\n",
+			mmc_hostname(mmc), __func__,
+			drv_type);
+}
+
+int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+	unsigned long flags;
 	int tuning_seq_cnt = 3;
-	u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+	u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
+	const u32 *tuning_block_pattern = tuning_block_64;
+	int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
 	int rc;
-	struct mmc_ios ios = host->mmc->ios;
+	struct mmc_host *mmc = host->mmc;
+	struct mmc_ios	ios = host->mmc->ios;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u8 drv_type = 0;
+	bool drv_type_changed = false;
+	struct mmc_card *card = host->mmc->card;
+	int sts_retry;
+	u8 last_good_phase = 0;
 
 	/*
 	 * Tuning is required for SDR104, HS200 and HS400 cards and
 	 * if clock frequency is greater than 100MHz in these modes.
 	 */
 	if (host->clock <= CORE_FREQ_100MHZ ||
-	    !(ios.timing == MMC_TIMING_MMC_HS400 ||
-	    ios.timing == MMC_TIMING_MMC_HS200 ||
-	    ios.timing == MMC_TIMING_UHS_SDR104))
+		!((ios.timing == MMC_TIMING_MMC_HS400) ||
+		(ios.timing == MMC_TIMING_MMC_HS200) ||
+		(ios.timing == MMC_TIMING_UHS_SDR104)))
 		return 0;
 
 	/*
-	 * For HS400 tuning in HS200 timing requires:
-	 * - select MCLK/2 in VENDOR_SPEC
-	 * - program MCLK to 400MHz (or nearest supported) in GCC
+	 * Don't allow re-tuning for CRC errors observed for any commands
+	 * that are sent during tuning sequence itself.
 	 */
-	if (host->flags & SDHCI_HS400_TUNING) {
-		sdhci_msm_hc_select_mode(host);
-		msm_set_clock_rate_for_bus_mode(host, ios.clock);
-		host->flags &= ~SDHCI_HS400_TUNING;
+	if (msm_host->tuning_in_progress)
+		return 0;
+	msm_host->tuning_in_progress = true;
+	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
+
+	/* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
+	if (msm_host->tuning_done && !msm_host->calibration_done &&
+		(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+		rc = sdhci_msm_hs400_dll_calibration(host);
+		spin_lock_irqsave(&host->lock, flags);
+		if (!rc)
+			msm_host->calibration_done = true;
+		spin_unlock_irqrestore(&host->lock, flags);
+		goto out;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
+		(mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
+		tuning_block_pattern = tuning_block_128;
+		size = sizeof(tuning_block_128);
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	data_buf = kmalloc(size, GFP_KERNEL);
+	if (!data_buf) {
+		rc = -ENOMEM;
+		goto out;
 	}
 
 retry:
-	/* First of all reset the tuning block */
-	rc = msm_init_cm_dll(host);
+	tuned_phase_cnt = 0;
+
+	/* first of all reset the tuning block */
+	rc = msm_init_cm_dll(host, DLL_INIT_NORMAL);
 	if (rc)
-		return rc;
+		goto kfree;
 
 	phase = 0;
 	do {
-		/* Set the phase in delay line hw block */
+		struct mmc_command cmd = {0};
+		struct mmc_data data = {0};
+		struct mmc_request mrq = {
+			.cmd = &cmd,
+			.data = &data
+		};
+		struct scatterlist sg;
+		struct mmc_command sts_cmd = {0};
+
+		/* set the phase in delay line hw block */
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
-			return rc;
+			goto kfree;
 
-		msm_host->saved_tuning_phase = phase;
-		rc = mmc_send_tuning(mmc, opcode, NULL);
-		if (!rc) {
-			/* Tuning is successful at this tuning point */
-			tuned_phases[tuned_phase_cnt++] = phase;
-			dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
-				 mmc_hostname(mmc), phase);
+		cmd.opcode = opcode;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+		data.blksz = size;
+		data.blocks = 1;
+		data.flags = MMC_DATA_READ;
+		data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
+
+		data.sg = &sg;
+		data.sg_len = 1;
+		sg_init_one(&sg, data_buf, size);
+		memset(data_buf, 0, size);
+		mmc_wait_for_req(mmc, &mrq);
+
+		if (card && (cmd.error || data.error)) {
+			/*
+			 * Set the dll to last known good phase while sending
+			 * status command to ensure that status command won't
+			 * fail due to bad phase.
+			 */
+			if (tuned_phase_cnt)
+				last_good_phase =
+					tuned_phases[tuned_phase_cnt-1];
+			else if (msm_host->saved_tuning_phase !=
+					INVALID_TUNING_PHASE)
+				last_good_phase = msm_host->saved_tuning_phase;
+
+			rc = msm_config_cm_dll_phase(host, last_good_phase);
+			if (rc)
+				goto kfree;
+
+			sts_cmd.opcode = MMC_SEND_STATUS;
+			sts_cmd.arg = card->rca << 16;
+			sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+			sts_retry = 5;
+			while (sts_retry) {
+				mmc_wait_for_cmd(mmc, &sts_cmd, 0);
+
+				if (sts_cmd.error ||
+				   (R1_CURRENT_STATE(sts_cmd.resp[0])
+				   != R1_STATE_TRAN)) {
+					sts_retry--;
+					/*
+					 * wait for at least 146 MCLK cycles for
+					 * the card to move to TRANS state. As
+					 * the MCLK would be min 200MHz for
+					 * tuning, we need max 0.73us delay. To
+					 * be on safer side 1ms delay is given.
+					 */
+					usleep_range(1000, 1200);
+					pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
+						mmc_hostname(mmc), phase,
+						sts_cmd.error, sts_cmd.resp[0]);
+					continue;
+				}
+				break;
+			}
 		}
-	} while (++phase < ARRAY_SIZE(tuned_phases));
+
+		if (!cmd.error && !data.error &&
+			!memcmp(data_buf, tuning_block_pattern, size)) {
+			/* tuning is successful at this tuning point */
+			tuned_phases[tuned_phase_cnt++] = phase;
+			pr_debug("%s: %s: found *** good *** phase = %d\n",
+				mmc_hostname(mmc), __func__, phase);
+		} else {
+			/* Ignore crc errors occurred during tuning */
+			if (cmd.error)
+				mmc->err_stats[MMC_ERR_CMD_CRC]--;
+			else if (data.error)
+				mmc->err_stats[MMC_ERR_DAT_CRC]--;
+			pr_debug("%s: %s: found ## bad ## phase = %d\n",
+				mmc_hostname(mmc), __func__, phase);
+		}
+	} while (++phase < 16);
+
+	if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
+			card && mmc_card_mmc(card)) {
+		/*
+		 * If all phases pass then its a problem. So change the card's
+		 * drive type to a different value, if supported and repeat
+		 * tuning until at least one phase fails. Then set the original
+		 * drive type back.
+		 *
+		 * If all the phases still pass after trying all possible
+		 * drive types, then one of those 16 phases will be picked.
+		 * This is no different from what was going on before the
+		 * modification to change drive type and retune.
+		 */
+		pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
+				tuned_phase_cnt);
+
+		/* set drive type to other value . default setting is 0x0 */
+		while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
+			pr_debug("%s: trying different drive strength (%d)\n",
+				mmc_hostname(mmc), drv_type);
+			if (card->ext_csd.raw_driver_strength &
+					(1 << drv_type)) {
+				sdhci_msm_set_mmc_drv_type(host, opcode,
+						drv_type);
+				if (!drv_type_changed)
+					drv_type_changed = true;
+				goto retry;
+			}
+		}
+	}
+
+	/* reset drive type to default (50 ohm) if changed */
+	if (drv_type_changed)
+		sdhci_msm_set_mmc_drv_type(host, opcode, 0);
 
 	if (tuned_phase_cnt) {
 		rc = msm_find_most_appropriate_phase(host, tuned_phases,
-						     tuned_phase_cnt);
+							tuned_phase_cnt);
 		if (rc < 0)
-			return rc;
+			goto kfree;
 		else
-			phase = rc;
+			phase = (u8)rc;
 
 		/*
 		 * Finally set the selected phase in delay
@@ -1093,239 +1382,1278 @@
 		 */
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
-			return rc;
-		dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
-			 mmc_hostname(mmc), phase);
+			goto kfree;
+		msm_host->saved_tuning_phase = phase;
+		pr_debug("%s: %s: finally setting the tuning phase to %d\n",
+				mmc_hostname(mmc), __func__, phase);
 	} else {
 		if (--tuning_seq_cnt)
 			goto retry;
-		/* Tuning failed */
-		dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
-		       mmc_hostname(mmc));
+		/* tuning failed */
+		pr_err("%s: %s: no tuning point found\n",
+			mmc_hostname(mmc), __func__);
 		rc = -EIO;
 	}
 
+kfree:
+	kfree(data_buf);
+out:
+	spin_lock_irqsave(&host->lock, flags);
 	if (!rc)
 		msm_host->tuning_done = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+	msm_host->tuning_in_progress = false;
+	pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
+	return rc;
+}
+
+static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+	struct sdhci_msm_gpio_data *curr;
+	int i, ret = 0;
+
+	curr = pdata->pin_data->gpio_data;
+	for (i = 0; i < curr->size; i++) {
+		if (!gpio_is_valid(curr->gpio[i].no)) {
+			ret = -EINVAL;
+			pr_err("%s: Invalid gpio = %d\n", __func__,
+					curr->gpio[i].no);
+			goto free_gpios;
+		}
+		if (enable) {
+			ret = gpio_request(curr->gpio[i].no,
+						curr->gpio[i].name);
+			if (ret) {
+				pr_err("%s: gpio_request(%d, %s) failed %d\n",
+					__func__, curr->gpio[i].no,
+					curr->gpio[i].name, ret);
+				goto free_gpios;
+			}
+			curr->gpio[i].is_enabled = true;
+		} else {
+			gpio_free(curr->gpio[i].no);
+			curr->gpio[i].is_enabled = false;
+		}
+	}
+	return ret;
+
+free_gpios:
+	for (i--; i >= 0; i--) {
+		gpio_free(curr->gpio[i].no);
+		curr->gpio[i].is_enabled = false;
+	}
+	return ret;
+}
+
+static int sdhci_msm_config_pinctrl_drv_type(struct sdhci_msm_pltfm_data *pdata,
+		unsigned int clock)
+{
+	int ret = 0;
+
+	if (clock > 150000000) {
+		if (pdata->pctrl_data->pins_drv_type_200MHz)
+			ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+				pdata->pctrl_data->pins_drv_type_200MHz);
+	} else if (clock > 75000000) {
+		if (pdata->pctrl_data->pins_drv_type_100MHz)
+			ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+				pdata->pctrl_data->pins_drv_type_100MHz);
+	} else if (clock > 400000) {
+		if (pdata->pctrl_data->pins_drv_type_50MHz)
+			ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+				pdata->pctrl_data->pins_drv_type_50MHz);
+	} else {
+		if (pdata->pctrl_data->pins_drv_type_400KHz)
+			ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+				pdata->pctrl_data->pins_drv_type_400KHz);
+	}
+
+	return ret;
+}
+
+static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
+		bool enable)
+{
+	int ret = 0;
+
+	if (enable)
+		ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+			pdata->pctrl_data->pins_active);
+	else
+		ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+			pdata->pctrl_data->pins_sleep);
+
+	if (ret < 0)
+		pr_err("%s state for pinctrl failed with %d\n",
+			enable ? "Enabling" : "Disabling", ret);
+
+	return ret;
+}
+
+static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+	int ret = 0;
+
+	if  (pdata->pin_cfg_sts == enable) {
+		return 0;
+	} else if (pdata->pctrl_data) {
+		ret = sdhci_msm_setup_pinctrl(pdata, enable);
+		goto out;
+	} else if (!pdata->pin_data) {
+		return 0;
+	}
+
+	if (pdata->pin_data->is_gpio)
+		ret = sdhci_msm_setup_gpio(pdata, enable);
+out:
+	if (!ret)
+		pdata->pin_cfg_sts = enable;
+
+	return ret;
+}
+
+static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
+				 u32 **out, int *len, u32 size)
+{
+	int ret = 0;
+	struct device_node *np = dev->of_node;
+	size_t sz;
+	u32 *arr = NULL;
+
+	if (!of_get_property(np, prop_name, len)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	sz = *len = *len / sizeof(*arr);
+	if (sz <= 0 || (size > 0 && (sz > size))) {
+		dev_err(dev, "%s invalid size\n", prop_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
+	if (!arr) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = of_property_read_u32_array(np, prop_name, arr, sz);
+	if (ret < 0) {
+		dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
+		goto out;
+	}
+	*out = arr;
+out:
+	if (ret)
+		*len = 0;
+	return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
+		struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
+{
+	int len, ret = 0;
+	const __be32 *prop;
+	char prop_name[MAX_PROP_SIZE];
+	struct sdhci_msm_reg_data *vreg;
+	struct device_node *np = dev->of_node;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+	if (!of_parse_phandle(np, prop_name, 0)) {
+		dev_info(dev, "No vreg data found for %s\n", vreg_name);
+		return ret;
+	}
+
+	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	vreg->name = vreg_name;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-always-on", vreg_name);
+	if (of_get_property(np, prop_name, NULL))
+		vreg->is_always_on = true;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-lpm-sup", vreg_name);
+	if (of_get_property(np, prop_name, NULL))
+		vreg->lpm_sup = true;
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-voltage-level", vreg_name);
+	prop = of_get_property(np, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_warn(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+	} else {
+		vreg->low_vol_level = be32_to_cpup(&prop[0]);
+		vreg->high_vol_level = be32_to_cpup(&prop[1]);
+	}
+
+	snprintf(prop_name, MAX_PROP_SIZE,
+			"qcom,%s-current-level", vreg_name);
+	prop = of_get_property(np, prop_name, &len);
+	if (!prop || (len != (2 * sizeof(__be32)))) {
+		dev_warn(dev, "%s %s property\n",
+			prop ? "invalid format" : "no", prop_name);
+	} else {
+		vreg->lpm_uA = be32_to_cpup(&prop[0]);
+		vreg->hpm_uA = be32_to_cpup(&prop[1]);
+	}
+
+	*vreg_data = vreg;
+	dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+		vreg->name, vreg->is_always_on ? "always_on," : "",
+		vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+		vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+
+	return ret;
+}
+
+static int sdhci_msm_parse_pinctrl_info(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	struct sdhci_pinctrl_data *pctrl_data;
+	struct pinctrl *pctrl;
+	int ret = 0;
+
+	/* Try to obtain pinctrl handle */
+	pctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(pctrl)) {
+		ret = PTR_ERR(pctrl);
+		goto out;
+	}
+	pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
+	if (!pctrl_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	pctrl_data->pctrl = pctrl;
+	/* Look-up and keep the states handy to be used later */
+	pctrl_data->pins_active = pinctrl_lookup_state(
+			pctrl_data->pctrl, "active");
+	if (IS_ERR(pctrl_data->pins_active)) {
+		ret = PTR_ERR(pctrl_data->pins_active);
+		dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
+		goto out;
+	}
+	pctrl_data->pins_sleep = pinctrl_lookup_state(
+			pctrl_data->pctrl, "sleep");
+	if (IS_ERR(pctrl_data->pins_sleep)) {
+		ret = PTR_ERR(pctrl_data->pins_sleep);
+		dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
+		goto out;
+	}
+
+	pctrl_data->pins_drv_type_400KHz = pinctrl_lookup_state(
+			pctrl_data->pctrl, "ds_400KHz");
+	if (IS_ERR(pctrl_data->pins_drv_type_400KHz)) {
+		dev_dbg(dev, "Could not get 400K pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_400KHz = NULL;
+	}
+
+	pctrl_data->pins_drv_type_50MHz = pinctrl_lookup_state(
+			pctrl_data->pctrl, "ds_50MHz");
+	if (IS_ERR(pctrl_data->pins_drv_type_50MHz)) {
+		dev_dbg(dev, "Could not get 50M pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_50MHz = NULL;
+	}
+
+	pctrl_data->pins_drv_type_100MHz = pinctrl_lookup_state(
+			pctrl_data->pctrl, "ds_100MHz");
+	if (IS_ERR(pctrl_data->pins_drv_type_100MHz)) {
+		dev_dbg(dev, "Could not get 100M pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_100MHz = NULL;
+	}
+
+	pctrl_data->pins_drv_type_200MHz = pinctrl_lookup_state(
+			pctrl_data->pctrl, "ds_200MHz");
+	if (IS_ERR(pctrl_data->pins_drv_type_200MHz)) {
+		dev_dbg(dev, "Could not get 200M pinstates, err:%d\n", ret);
+		pctrl_data->pins_drv_type_200MHz = NULL;
+	}
+
+	pdata->pctrl_data = pctrl_data;
+out:
+	return ret;
+}
+
+#define GPIO_NAME_MAX_LEN 32
+static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	int ret = 0, cnt, i;
+	struct sdhci_msm_pin_data *pin_data;
+	struct device_node *np = dev->of_node;
+
+	ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
+	if (!ret) {
+		goto out;
+	} else if (ret == -EPROBE_DEFER) {
+		dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
+		goto out;
+	} else {
+		dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
+			ret);
+		ret = 0;
+	}
+	pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
+	if (!pin_data) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	cnt = of_gpio_count(np);
+	if (cnt > 0) {
+		pin_data->gpio_data = devm_kzalloc(dev,
+				sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
+		if (!pin_data->gpio_data) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		pin_data->gpio_data->size = cnt;
+		pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
+				sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
+
+		if (!pin_data->gpio_data->gpio) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		for (i = 0; i < cnt; i++) {
+			const char *name = NULL;
+			char result[GPIO_NAME_MAX_LEN];
+
+			pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
+			of_property_read_string_index(np,
+					"qcom,gpio-names", i, &name);
+
+			snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
+					dev_name(dev), name ? name : "?");
+			pin_data->gpio_data->gpio[i].name = result;
+			dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
+					pin_data->gpio_data->gpio[i].name,
+					pin_data->gpio_data->gpio[i].no);
+		}
+	}
+	pdata->pin_data = pin_data;
+out:
+	if (ret)
+		dev_err(dev, "%s failed with err %d\n", __func__, ret);
+	return ret;
+}
+
+#ifdef CONFIG_SMP
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
+{
+	pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
+}
+#else
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
+#endif
+
+static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	struct device_node *np = dev->of_node;
+	const char *str;
+	u32 cpu;
+	int ret = 0;
+	int i;
+
+	pdata->pm_qos_data.irq_valid = false;
+	pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
+	if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
+		!strcmp(str, "affine_irq")) {
+		parse_affine_irq(pdata);
+	}
+
+	/* must specify cpu for "affine_cores" type */
+	if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
+		pdata->pm_qos_data.irq_cpu = -1;
+		ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
+		if (ret) {
+			dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
+				ret);
+			goto out;
+		}
+		if (cpu < 0 || cpu >= num_possible_cpus()) {
+			dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
+				__func__, cpu, num_possible_cpus());
+			ret = -EINVAL;
+			goto out;
+		}
+		pdata->pm_qos_data.irq_cpu = cpu;
+	}
+
+	if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
+		SDHCI_POWER_POLICY_NUM) {
+		dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
+			__func__, SDHCI_POWER_POLICY_NUM);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
+		of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
+			&pdata->pm_qos_data.irq_latency.latency[i]);
+
+	pdata->pm_qos_data.irq_valid = true;
+out:
+	return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
+		struct sdhci_msm_pltfm_data *pdata)
+{
+	struct device_node *np = dev->of_node;
+	u32 mask;
+	int nr_groups;
+	int ret;
+	int i;
+
+	/* Read cpu group mapping */
+	nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
+	if (nr_groups <= 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+	pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
+	pdata->pm_qos_data.cpu_group_map.mask =
+		kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
+	if (!pdata->pm_qos_data.cpu_group_map.mask) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < nr_groups; i++) {
+		of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
+			i, &mask);
+
+		pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
+		if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
+			cpu_possible_mask)) {
+			dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
+				__func__, mask, i);
+			ret = -EINVAL;
+			goto free_res;
+		}
+	}
+	return 0;
+
+free_res:
+	kfree(pdata->pm_qos_data.cpu_group_map.mask);
+out:
+	return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
+		int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
+{
+	struct device_node *np = dev->of_node;
+	struct sdhci_msm_pm_qos_latency *values;
+	int ret;
+	int i;
+	int group;
+	int cfg;
+
+	ret = of_property_count_u32_elems(np, name);
+	if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
+		dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
+			__func__, name,	SDHCI_POWER_POLICY_NUM * nr_groups,
+			ret);
+		return -EINVAL;
+	} else if (ret < 0) {
+		return ret;
+	}
+
+	values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
+			GFP_KERNEL);
+	if (!values)
+		return -ENOMEM;
+
+	for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
+		group = i / SDHCI_POWER_POLICY_NUM;
+		cfg = i % SDHCI_POWER_POLICY_NUM;
+		of_property_read_u32_index(np, name, i,
+				&(values[group].latency[cfg]));
+	}
+
+	*latency = values;
+	return 0;
+}
+
+static void sdhci_msm_pm_qos_parse(struct device *dev,
+				struct sdhci_msm_pltfm_data *pdata)
+{
+	if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
+		dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
+			__func__);
+
+	if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
+		pdata->pm_qos_data.cmdq_valid =
+			!sdhci_msm_pm_qos_parse_latency(dev,
+				"qcom,pm-qos-cmdq-latency-us",
+				pdata->pm_qos_data.cpu_group_map.nr_groups,
+				&pdata->pm_qos_data.cmdq_latency);
+		pdata->pm_qos_data.legacy_valid =
+			!sdhci_msm_pm_qos_parse_latency(dev,
+				"qcom,pm-qos-legacy-latency-us",
+				pdata->pm_qos_data.cpu_group_map.nr_groups,
+				&pdata->pm_qos_data.latency);
+		if (!pdata->pm_qos_data.cmdq_valid &&
+			!pdata->pm_qos_data.legacy_valid) {
+			/* clean-up previously allocated arrays */
+			kfree(pdata->pm_qos_data.latency);
+			kfree(pdata->pm_qos_data.cmdq_latency);
+			dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
+				__func__);
+		}
+	} else {
+		dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
+			__func__);
+	}
+}
+
+/* Parse platform data */
+static
+struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
+						struct sdhci_msm_host *msm_host)
+{
+	struct sdhci_msm_pltfm_data *pdata = NULL;
+	struct device_node *np = dev->of_node;
+	u32 bus_width = 0;
+	int len, i;
+	int clk_table_len;
+	u32 *clk_table = NULL;
+	int ice_clk_table_len;
+	u32 *ice_clk_table = NULL;
+	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+	const char *lower_bus_speed = NULL;
+	int bus_clk_table_len;
+	u32 *bus_clk_table = NULL;
+
+	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		goto out;
+
+	pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+	if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
+		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
+	of_property_read_u32(np, "qcom,bus-width", &bus_width);
+	if (bus_width == 8)
+		pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
+	else if (bus_width == 4)
+		pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
+	else {
+		dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
+		pdata->mmc_bus_width = 0;
+	}
+
+	if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
+			&msm_host->mmc->clk_scaling.pltfm_freq_table,
+			&msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
+		pr_debug("%s: no clock scaling frequencies were supplied\n",
+			dev_name(dev));
+	else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
+			!msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
+		dev_err(dev, "bad dts clock scaling frequencies\n");
+
+	/*
+	 * Few hosts can support DDR52 mode at the same lower
+	 * system voltage corner as high-speed mode. In such cases,
+	 * it is always better to put it in DDR mode which will
+	 * improve the performance without any power impact.
+	 */
+	if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
+				&lower_bus_speed)) {
+		if (!strcmp(lower_bus_speed, "DDR52"))
+			msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
+				MMC_SCALING_LOWER_DDR52_MODE;
+	}
+
+	if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
+			&clk_table, &clk_table_len, 0)) {
+		dev_err(dev, "failed parsing supported clock rates\n");
+		goto out;
+	}
+	if (!clk_table || !clk_table_len) {
+		dev_err(dev, "Invalid clock table\n");
+		goto out;
+	}
+	pdata->sup_clk_table = clk_table;
+	pdata->sup_clk_cnt = clk_table_len;
+
+	if (!sdhci_msm_dt_get_array(dev, "qcom,bus-aggr-clk-rates",
+			&bus_clk_table, &bus_clk_table_len, 0)) {
+		if (bus_clk_table && bus_clk_table_len) {
+			pdata->bus_clk_table = bus_clk_table;
+			pdata->bus_clk_cnt = bus_clk_table_len;
+		}
+	}
+
+	if (msm_host->ice.pdev) {
+		if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
+				&ice_clk_table, &ice_clk_table_len, 0)) {
+			dev_err(dev, "failed parsing supported ice clock rates\n");
+			goto out;
+		}
+		if (!ice_clk_table || !ice_clk_table_len) {
+			dev_err(dev, "Invalid clock table\n");
+			goto out;
+		}
+		if (ice_clk_table_len != 2) {
+			dev_err(dev, "Need max and min frequencies in the table\n");
+			goto out;
+		}
+		pdata->sup_ice_clk_table = ice_clk_table;
+		pdata->sup_ice_clk_cnt = ice_clk_table_len;
+		pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
+		pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
+		dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
+				pdata->ice_clk_max, pdata->ice_clk_min);
+	}
+
+	pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
+						    sdhci_msm_slot_reg_data),
+					GFP_KERNEL);
+	if (!pdata->vreg_data) {
+		dev_err(dev, "failed to allocate memory for vreg data\n");
+		goto out;
+	}
+
+	if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
+					 "vdd")) {
+		dev_err(dev, "failed parsing vdd data\n");
+		goto out;
+	}
+	if (sdhci_msm_dt_parse_vreg_info(dev,
+					 &pdata->vreg_data->vdd_io_data,
+					 "vdd-io")) {
+		dev_err(dev, "failed parsing vdd-io data\n");
+		goto out;
+	}
+
+	if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
+		dev_err(dev, "failed parsing gpio data\n");
+		goto out;
+	}
+
+	len = of_property_count_strings(np, "qcom,bus-speed-mode");
+
+	for (i = 0; i < len; i++) {
+		const char *name = NULL;
+
+		of_property_read_string_index(np,
+			"qcom,bus-speed-mode", i, &name);
+		if (!name)
+			continue;
+
+		if (!strcmp(name, "HS400_1p8v"))
+			pdata->caps2 |= MMC_CAP2_HS400_1_8V;
+		else if (!strcmp(name, "HS400_1p2v"))
+			pdata->caps2 |= MMC_CAP2_HS400_1_2V;
+		else if (!strcmp(name, "HS200_1p8v"))
+			pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+		else if (!strcmp(name, "HS200_1p2v"))
+			pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+		else if (!strcmp(name, "DDR_1p8v"))
+			pdata->caps |= MMC_CAP_1_8V_DDR
+						| MMC_CAP_UHS_DDR50;
+		else if (!strcmp(name, "DDR_1p2v"))
+			pdata->caps |= MMC_CAP_1_2V_DDR
+						| MMC_CAP_UHS_DDR50;
+	}
+
+	if (of_get_property(np, "qcom,nonremovable", NULL))
+		pdata->nonremovable = true;
+
+	if (of_get_property(np, "qcom,nonhotplug", NULL))
+		pdata->nonhotplug = true;
+
+	pdata->largeaddressbus =
+		of_property_read_bool(np, "qcom,large-address-bus");
+
+	sdhci_msm_pm_qos_parse(dev, pdata);
+
+	if (of_get_property(np, "qcom,core_3_0v_support", NULL))
+		msm_host->core_3_0v_support = true;
+
+	msm_host->regs_restore.is_supported =
+		of_property_read_bool(np, "qcom,restore-after-cx-collapse");
+
+	if (!of_property_read_u32(np, "qcom,ddr-config", &pdata->ddr_config))
+		pdata->rclk_wa = true;
+
+	return pdata;
+out:
+	return NULL;
+}
+
+/* Returns required bandwidth in Bytes per Sec */
+static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
+					struct mmc_ios *ios)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	unsigned int bw;
+
+	bw = msm_host->clk_rate;
+	/*
+	 * For DDR mode, SDCC controller clock will be at
+	 * the double rate than the actual clock that goes to card.
+	 */
+	if (ios->bus_width == MMC_BUS_WIDTH_4)
+		bw /= 2;
+	else if (ios->bus_width == MMC_BUS_WIDTH_1)
+		bw /= 8;
+
+	return bw;
+}
+
+static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
+					   unsigned int bw)
+{
+	unsigned int *table = host->pdata->voting_data->bw_vecs;
+	unsigned int size = host->pdata->voting_data->bw_vecs_size;
+	int i;
+
+	if (host->msm_bus_vote.is_max_bw_needed && bw)
+		return host->msm_bus_vote.max_bw_vote;
+
+	for (i = 0; i < size; i++) {
+		if (bw <= table[i])
+			break;
+	}
+
+	if (i && (i == size))
+		i--;
+
+	return i;
+}
+
+/*
+ * This function must be called with host lock acquired.
+ * Caller of this function should also ensure that msm bus client
+ * handle is not null.
+ */
+static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
+					     int vote,
+					     unsigned long *flags)
+{
+	struct sdhci_host *host =  platform_get_drvdata(msm_host->pdev);
+	int rc = 0;
+
+	WARN_ON(!flags);
+
+	if (vote != msm_host->msm_bus_vote.curr_vote) {
+		spin_unlock_irqrestore(&host->lock, *flags);
+		rc = msm_bus_scale_client_update_request(
+				msm_host->msm_bus_vote.client_handle, vote);
+		spin_lock_irqsave(&host->lock, *flags);
+		if (rc) {
+			pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
+				mmc_hostname(host->mmc),
+				msm_host->msm_bus_vote.client_handle, vote, rc);
+			goto out;
+		}
+		msm_host->msm_bus_vote.curr_vote = vote;
+	}
+out:
 	return rc;
 }
 
 /*
- * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
- * This needs to be done for both tuning and enhanced_strobe mode.
- * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
- * fixed feedback clock is used.
+ * Internal work. Work to set 0 bandwidth for msm bus.
  */
-static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
+static void sdhci_msm_bus_work(struct work_struct *work)
 {
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	int ret;
+	struct sdhci_msm_host *msm_host;
+	struct sdhci_host *host;
+	unsigned long flags;
 
-	if (host->clock > CORE_FREQ_100MHZ &&
-	    (msm_host->tuning_done || ios->enhanced_strobe) &&
-	    !msm_host->calibration_done) {
-		ret = sdhci_msm_hs400_dll_calibration(host);
-		if (!ret)
-			msm_host->calibration_done = true;
-		else
-			pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
-			       mmc_hostname(host->mmc), ret);
-	}
-}
+	msm_host = container_of(work, struct sdhci_msm_host,
+				msm_bus_vote.vote_work.work);
+	host =  platform_get_drvdata(msm_host->pdev);
 
-static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
-					unsigned int uhs)
-{
-	struct mmc_host *mmc = host->mmc;
-	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	u16 ctrl_2;
-	u32 config;
-	const struct sdhci_msm_offset *msm_offset =
-					msm_host->offset;
+	if (!msm_host->msm_bus_vote.client_handle)
+		return;
 
-	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-	/* Select Bus Speed Mode for host */
-	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-	switch (uhs) {
-	case MMC_TIMING_UHS_SDR12:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
-		break;
-	case MMC_TIMING_UHS_SDR25:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
-		break;
-	case MMC_TIMING_UHS_SDR50:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
-		break;
-	case MMC_TIMING_MMC_HS400:
-	case MMC_TIMING_MMC_HS200:
-	case MMC_TIMING_UHS_SDR104:
-		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
-		break;
-	case MMC_TIMING_UHS_DDR50:
-	case MMC_TIMING_MMC_DDR52:
-		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
-		break;
-	}
-
-	/*
-	 * When clock frequency is less than 100MHz, the feedback clock must be
-	 * provided and DLL must not be used so that tuning can be skipped. To
-	 * provide feedback clock, the mode selection can be any value less
-	 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
-	 */
-	if (host->clock <= CORE_FREQ_100MHZ) {
-		if (uhs == MMC_TIMING_MMC_HS400 ||
-		    uhs == MMC_TIMING_MMC_HS200 ||
-		    uhs == MMC_TIMING_UHS_SDR104)
-			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
-		/*
-		 * DLL is not required for clock <= 100MHz
-		 * Thus, make sure DLL it is disabled when not required
-		 */
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config);
-		config |= CORE_DLL_RST;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config);
-
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_dll_config);
-		config |= CORE_DLL_PDN;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_dll_config);
-
-		/*
-		 * The DLL needs to be restored and CDCLP533 recalibrated
-		 * when the clock frequency is set back to 400MHz.
-		 */
-		msm_host->calibration_done = false;
-	}
-
-	dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
-		mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
-	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
-
-	if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
-		sdhci_msm_hs400(host, &mmc->ios);
-}
-
-static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
-{
-	init_waitqueue_head(&msm_host->pwr_irq_wait);
-}
-
-static inline void sdhci_msm_complete_pwr_irq_wait(
-		struct sdhci_msm_host *msm_host)
-{
-	wake_up(&msm_host->pwr_irq_wait);
+	spin_lock_irqsave(&host->lock, flags);
+	/* don't vote for 0 bandwidth if any request is in progress */
+	if (!host->mrq) {
+		sdhci_msm_bus_set_vote(msm_host,
+			msm_host->msm_bus_vote.min_bw_vote, &flags);
+	} else
+		pr_warn("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
+			   mmc_hostname(host->mmc), __func__);
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 /*
- * sdhci_msm_check_power_status API should be called when registers writes
- * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
- * To what state the register writes will change the IO lines should be passed
- * as the argument req_type. This API will check whether the IO line's state
- * is already the expected state and will wait for power irq only if
- * power irq is expected to be trigerred based on the current IO line state
- * and expected IO line state.
+ * This function cancels any scheduled delayed work and sets the bus
+ * vote based on bw (bandwidth) argument.
  */
-static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
+						unsigned int bw)
 {
+	int vote;
+	unsigned long flags;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	bool done = false;
-	u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
-	const struct sdhci_msm_offset *msm_offset =
-					msm_host->offset;
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 
-	pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
-			mmc_hostname(host->mmc), __func__, req_type,
-			msm_host->curr_pwr_state, msm_host->curr_io_level);
-
-	/*
-	 * The power interrupt will not be generated for signal voltage
-	 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
-	 * Since sdhci-msm-v5, this bit has been removed and SW must consider
-	 * it as always set.
-	 */
-	if (!msm_host->mci_removed)
-		val = msm_host_readl(msm_host, host,
-				msm_offset->core_generics);
-	if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
-	    !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
-		return;
-	}
-
-	/*
-	 * The IRQ for request type IO High/LOW will be generated when -
-	 * there is a state change in 1.8V enable bit (bit 3) of
-	 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
-	 * which indicates 3.3V IO voltage. So, when MMC core layer tries
-	 * to set it to 3.3V before card detection happens, the
-	 * IRQ doesn't get triggered as there is no state change in this bit.
-	 * The driver already handles this case by changing the IO voltage
-	 * level to high as part of controller power up sequence. Hence, check
-	 * for host->pwr to handle a case where IO voltage high request is
-	 * issued even before controller power up.
-	 */
-	if ((req_type & REQ_IO_HIGH) && !host->pwr) {
-		pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
-				mmc_hostname(host->mmc), req_type);
-		return;
-	}
-	if ((req_type & msm_host->curr_pwr_state) ||
-			(req_type & msm_host->curr_io_level))
-		done = true;
-	/*
-	 * This is needed here to handle cases where register writes will
-	 * not change the current bus state or io level of the controller.
-	 * In this case, no power irq will be triggerred and we should
-	 * not wait.
-	 */
-	if (!done) {
-		if (!wait_event_timeout(msm_host->pwr_irq_wait,
-				msm_host->pwr_irq_flag,
-				msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
-			dev_warn(&msm_host->pdev->dev,
-				 "%s: pwr_irq for req: (%d) timed out\n",
-				 mmc_hostname(host->mmc), req_type);
-	}
-	pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
-			__func__, req_type);
+	cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
+	spin_lock_irqsave(&host->lock, flags);
+	vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
+	sdhci_msm_bus_set_vote(msm_host, vote, &flags);
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
-static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+#define MSM_MMC_BUS_VOTING_DELAY	200 /* msecs */
+
+/* This function queues a work which will set the bandwidth requiement to 0 */
+static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
+{
+	unsigned long flags;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (msm_host->msm_bus_vote.min_bw_vote !=
+		msm_host->msm_bus_vote.curr_vote)
+		queue_delayed_work(system_wq,
+				   &msm_host->msm_bus_vote.vote_work,
+				   msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
+	spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
+				struct platform_device *pdev)
+{
+	int rc = 0;
+	struct msm_bus_scale_pdata *bus_pdata;
+
+	struct sdhci_msm_bus_voting_data *data;
+	struct device *dev = &pdev->dev;
+
+	data = devm_kzalloc(dev,
+		sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
+	if (!data) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	data->bus_pdata = msm_bus_cl_get_pdata(pdev);
+	if (data->bus_pdata) {
+		rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
+				&data->bw_vecs, &data->bw_vecs_size, 0);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: Failed to get bus-bw-vectors-bps\n",
+				__func__);
+			goto out;
+		}
+		host->pdata->voting_data = data;
+	}
+	if (host->pdata->voting_data &&
+		host->pdata->voting_data->bus_pdata &&
+		host->pdata->voting_data->bw_vecs &&
+		host->pdata->voting_data->bw_vecs_size) {
+
+		bus_pdata = host->pdata->voting_data->bus_pdata;
+		host->msm_bus_vote.client_handle =
+				msm_bus_scale_register_client(bus_pdata);
+		if (!host->msm_bus_vote.client_handle) {
+			dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
+			rc = -EFAULT;
+			goto out;
+		}
+		/* cache the vote index for minimum and maximum bandwidth */
+		host->msm_bus_vote.min_bw_vote =
+				sdhci_msm_bus_get_vote_for_bw(host, 0);
+		host->msm_bus_vote.max_bw_vote =
+				sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
+	} else {
+		devm_kfree(dev, data);
+	}
+
+out:
+	return rc;
+}
+
+static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
+{
+	if (host->msm_bus_vote.client_handle)
+		msm_bus_scale_unregister_client(
+			host->msm_bus_vote.client_handle);
+}
+
+static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	const struct sdhci_msm_offset *msm_offset =
-					msm_host->offset;
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct mmc_ios *ios = &host->mmc->ios;
+	unsigned int bw;
 
-	pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+	if (!msm_host->msm_bus_vote.client_handle)
+		return;
+
+	bw = sdhci_get_bw_required(host, ios);
+	if (enable) {
+		sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
+	} else {
+		/*
+		 * If clock gating is enabled, then remove the vote
+		 * immediately because clocks will be disabled only
+		 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
+		 * additional delay is required to remove the bus vote.
+		 */
+#ifdef CONFIG_MMC_CLKGATE
+		if (host->mmc->clkgate_delay)
+			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+		else
+#endif
+			sdhci_msm_bus_queue_work(host);
+	}
+}
+
+/* Regulator utility functions */
+static int sdhci_msm_vreg_init_reg(struct device *dev,
+					struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* check if regulator is already initialized? */
+	if (vreg->reg)
+		goto out;
+
+	/* Get the regulator handle */
+	vreg->reg = devm_regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		ret = PTR_ERR(vreg->reg);
+		pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
+			__func__, vreg->name, ret);
+		goto out;
+	}
+
+	if (regulator_count_voltages(vreg->reg) > 0) {
+		vreg->set_voltage_sup = true;
+		/* sanity check */
+		if (!vreg->high_vol_level || !vreg->hpm_uA) {
+			pr_err("%s: %s invalid constraints specified\n",
+			       __func__, vreg->name);
+			ret = -EINVAL;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
+{
+	if (vreg->reg)
+		devm_regulator_put(vreg->reg);
+}
+
+static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
+						  *vreg, int uA_load)
+{
+	int ret = 0;
+
+	/*
+	 * regulators that do not support regulator_set_voltage also
+	 * do not support regulator_set_optimum_mode
+	 */
+	if (vreg->set_voltage_sup) {
+		ret = regulator_set_load(vreg->reg, uA_load);
+		if (ret < 0)
+			pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
+			       __func__, vreg->name, uA_load, ret);
+		else
+			/*
+			 * regulator_set_load() can return non zero
+			 * value even for success case.
+			 */
+			ret = 0;
+	}
+	return ret;
+}
+
+static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
+					int min_uV, int max_uV)
+{
+	int ret = 0;
+
+	if (vreg->set_voltage_sup) {
+		ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+		if (ret) {
+			pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
+			       __func__, vreg->name, min_uV, max_uV, ret);
+		}
+	}
+
+	return ret;
+}
+
+static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* Put regulator in HPM (high power mode) */
+	ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
+	if (ret < 0)
+		return ret;
+
+	if (!vreg->is_enabled) {
+		/* Set voltage level */
+		ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
+						vreg->high_vol_level);
+		if (ret)
+			return ret;
+	}
+	ret = regulator_enable(vreg->reg);
+	if (ret) {
+		pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
+				__func__, vreg->name, ret);
+		return ret;
+	}
+	vreg->is_enabled = true;
+	return ret;
+}
+
+static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
+{
+	int ret = 0;
+
+	/* Never disable regulator marked as always_on */
+	if (vreg->is_enabled && !vreg->is_always_on) {
+		ret = regulator_disable(vreg->reg);
+		if (ret) {
+			pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
+				__func__, vreg->name, ret);
+			goto out;
+		}
+		vreg->is_enabled = false;
+
+		ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
+		if (ret < 0)
+			goto out;
+
+		/* Set min. voltage level to 0 */
+		ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
+		if (ret)
+			goto out;
+	} else if (vreg->is_enabled && vreg->is_always_on) {
+		if (vreg->lpm_sup) {
+			/* Put always_on regulator in LPM (low power mode) */
+			ret = sdhci_msm_vreg_set_optimum_mode(vreg,
+							      vreg->lpm_uA);
+			if (ret < 0)
+				goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
+			bool enable, bool is_init)
+{
+	int ret = 0, i;
+	struct sdhci_msm_slot_reg_data *curr_slot;
+	struct sdhci_msm_reg_data *vreg_table[2];
+
+	curr_slot = pdata->vreg_data;
+	if (!curr_slot) {
+		pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
+			 __func__);
+		goto out;
+	}
+
+	vreg_table[0] = curr_slot->vdd_data;
+	vreg_table[1] = curr_slot->vdd_io_data;
+
+	for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
+		if (vreg_table[i]) {
+			if (enable)
+				ret = sdhci_msm_vreg_enable(vreg_table[i]);
+			else
+				ret = sdhci_msm_vreg_disable(vreg_table[i]);
+			if (ret)
+				goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+/* This init function should be called only once for each SDHC slot */
+static int sdhci_msm_vreg_init(struct device *dev,
+				struct sdhci_msm_pltfm_data *pdata,
+				bool is_init)
+{
+	int ret = 0;
+	struct sdhci_msm_slot_reg_data *curr_slot;
+	struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
+
+	curr_slot = pdata->vreg_data;
+	if (!curr_slot)
+		goto out;
+
+	curr_vdd_reg = curr_slot->vdd_data;
+	curr_vdd_io_reg = curr_slot->vdd_io_data;
+
+	if (!is_init)
+		/* Deregister all regulators from regulator framework */
+		goto vdd_io_reg_deinit;
+
+	/*
+	 * Get the regulator handle from voltage regulator framework
+	 * and then try to set the voltage level for the regulator
+	 */
+	if (curr_vdd_reg) {
+		ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
+		if (ret)
+			goto out;
+	}
+	if (curr_vdd_io_reg) {
+		ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
+		if (ret)
+			goto vdd_reg_deinit;
+	}
+
+	if (ret)
+		dev_err(dev, "vreg reset failed (%d)\n", ret);
+	goto out;
+
+vdd_io_reg_deinit:
+	if (curr_vdd_io_reg)
+		sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
+vdd_reg_deinit:
+	if (curr_vdd_reg)
+		sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
+out:
+	return ret;
+}
+
+
+static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
+			enum vdd_io_level level,
+			unsigned int voltage_level)
+{
+	int ret = 0;
+	int set_level;
+	struct sdhci_msm_reg_data *vdd_io_reg;
+
+	if (!pdata->vreg_data)
+		return ret;
+
+	vdd_io_reg = pdata->vreg_data->vdd_io_data;
+	if (vdd_io_reg && vdd_io_reg->is_enabled) {
+		switch (level) {
+		case VDD_IO_LOW:
+			set_level = vdd_io_reg->low_vol_level;
+			break;
+		case VDD_IO_HIGH:
+			set_level = vdd_io_reg->high_vol_level;
+			break;
+		case VDD_IO_SET_LEVEL:
+			set_level = voltage_level;
+			break;
+		default:
+			pr_err("%s: invalid argument level = %d\n",
+					__func__, level);
+			ret = -EINVAL;
+			return ret;
+		}
+		ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
+				set_level);
+	}
+	return ret;
+}
+
+/*
+ * Acquire spin-lock host->lock before calling this function
+ */
+static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
+					      bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (enable && !msm_host->is_sdiowakeup_enabled)
+		enable_irq(msm_host->pdata->sdiowakeup_irq);
+	else if (!enable && msm_host->is_sdiowakeup_enabled)
+		disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
+	else
+		dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
+			__func__, enable, msm_host->is_sdiowakeup_enabled);
+	msm_host->is_sdiowakeup_enabled = enable;
+}
+
+static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
+{
+	struct sdhci_host *host = (struct sdhci_host *)data;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	unsigned long flags;
+
+	pr_debug("%s: irq (%d) received\n", __func__, irq);
+
+	spin_lock_irqsave(&host->lock, flags);
+	sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+	spin_unlock_irqrestore(&host->lock, flags);
+	msm_host->sdio_pending_processing = true;
+
+	return IRQ_HANDLED;
+}
+
+void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	unsigned int irq_flags = 0;
+	struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
+
+	if (pwr_irq_desc)
+		irq_flags = ACCESS_PRIVATE(pwr_irq_desc->irq_data.common,
+				state_use_accessors);
+
+	pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
 		mmc_hostname(host->mmc),
-		msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
-		msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
-		msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_STATUS),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_MASK),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
+
+	MMC_TRACE(host->mmc,
+		"%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
+		__func__,
+		sdhci_msm_readb_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_STATUS),
+		sdhci_msm_readb_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_MASK),
+		sdhci_msm_readb_relaxed(host,
+			msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
 }
 
-static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
+static int sdhci_msm_clear_pwrctl_status(struct sdhci_host *host, u8 value)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	u32 irq_status, irq_ack = 0;
-	int retry = 10;
-	u32 pwr_state = 0, io_level = 0;
-	u32 config;
-	const struct sdhci_msm_offset *msm_offset = msm_host->offset;
-
-	irq_status = msm_host_readl(msm_host, host,
-			msm_offset->core_pwrctl_status);
-	irq_status &= INT_MASK;
-
-	msm_host_writel(msm_host, irq_status, host,
-			msm_offset->core_pwrctl_clear);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset = msm_host->offset;
+	int ret = 0, retry = 10;
 
 	/*
 	 * There is a rare HW scenario where the first clear pulse could be
@@ -1334,373 +2662,2046 @@
 	 * sure status register is cleared. Otherwise, this will result in
 	 * a spurious power IRQ resulting in system instability.
 	 */
-	while (irq_status & msm_host_readl(msm_host, host,
-				msm_offset->core_pwrctl_status)) {
+	do {
 		if (retry == 0) {
 			pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
-					mmc_hostname(host->mmc), irq_status);
+				mmc_hostname(host->mmc), value);
 			sdhci_msm_dump_pwr_ctrl_regs(host);
 			WARN_ON(1);
+			ret = -EBUSY;
 			break;
 		}
-		msm_host_writel(msm_host, irq_status, host,
-			msm_offset->core_pwrctl_clear);
+
+		/*
+		 * Clear the PWRCTL_STATUS interrupt bits by writing to the
+		 * corresponding bits in the PWRCTL_CLEAR register.
+		 */
+		sdhci_msm_writeb_relaxed(value, host,
+				msm_host_offset->CORE_PWRCTL_CLEAR);
+		/*
+		 * SDHC has core_mem and hc_mem device memory and these memory
+		 * addresses do not fall within 1KB region. Hence, any update to
+		 * core_mem address space would require an mb() to ensure this
+		 * gets completed before its next update to registers within
+		 * hc_mem.
+		 */
+		mb();
 		retry--;
 		udelay(10);
-	}
+	} while (value & sdhci_msm_readb_relaxed(host,
+				msm_host_offset->CORE_PWRCTL_STATUS));
 
-	/* Handle BUS ON/OFF*/
-	if (irq_status & CORE_PWRCTL_BUS_ON) {
-		pwr_state = REQ_BUS_ON;
-		io_level = REQ_IO_HIGH;
-		irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
-	}
-	if (irq_status & CORE_PWRCTL_BUS_OFF) {
-		pwr_state = REQ_BUS_OFF;
-		io_level = REQ_IO_LOW;
-		irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
-	}
-	/* Handle IO LOW/HIGH */
-	if (irq_status & CORE_PWRCTL_IO_LOW) {
-		io_level = REQ_IO_LOW;
-		irq_ack |= CORE_PWRCTL_IO_SUCCESS;
-	}
-	if (irq_status & CORE_PWRCTL_IO_HIGH) {
-		io_level = REQ_IO_HIGH;
-		irq_ack |= CORE_PWRCTL_IO_SUCCESS;
-	}
-
-	/*
-	 * The driver has to acknowledge the interrupt, switch voltages and
-	 * report back if it succeded or not to this register. The voltage
-	 * switches are handled by the sdhci core, so just report success.
-	 */
-	msm_host_writel(msm_host, irq_ack, host,
-			msm_offset->core_pwrctl_ctl);
-
-	/*
-	 * If we don't have info regarding the voltage levels supported by
-	 * regulators, don't change the IO PAD PWR SWITCH.
-	 */
-	if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
-		u32 new_config;
-		/*
-		 * We should unset IO PAD PWR switch only if the register write
-		 * can set IO lines high and the regulator also switches to 3 V.
-		 * Else, we should keep the IO PAD PWR switch set.
-		 * This is applicable to certain targets where eMMC vccq supply
-		 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
-		 * IO PAD PWR switch must be kept set to reflect actual
-		 * regulator voltage. This way, during initialization of
-		 * controllers with only 1.8V, we will set the IO PAD bit
-		 * without waiting for a REQ_IO_LOW.
-		 */
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_vendor_spec);
-		new_config = config;
-
-		if ((io_level & REQ_IO_HIGH) &&
-				(msm_host->caps_0 & CORE_3_0V_SUPPORT))
-			new_config &= ~CORE_IO_PAD_PWR_SWITCH;
-		else if ((io_level & REQ_IO_LOW) ||
-				(msm_host->caps_0 & CORE_1_8V_SUPPORT))
-			new_config |= CORE_IO_PAD_PWR_SWITCH;
-
-		if (config ^ new_config)
-			writel_relaxed(new_config, host->ioaddr +
-					msm_offset->core_vendor_spec);
-	}
-
-	if (pwr_state)
-		msm_host->curr_pwr_state = pwr_state;
-	if (io_level)
-		msm_host->curr_io_level = io_level;
-
-	pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
-		mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
-		irq_ack);
+	return ret;
 }
 
 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
 {
 	struct sdhci_host *host = (struct sdhci_host *)data;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u8 irq_status = 0;
+	u8 irq_ack = 0;
+	int ret = 0;
+	int pwr_state = 0, io_level = 0;
+	unsigned long flags;
 
-	sdhci_msm_handle_pwr_irq(host, irq);
-	msm_host->pwr_irq_flag = 1;
-	sdhci_msm_complete_pwr_irq_wait(msm_host);
+	irq_status = sdhci_msm_readb_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_STATUS);
 
+	pr_debug("%s: Received IRQ(%d), status=0x%x\n",
+		mmc_hostname(msm_host->mmc), irq, irq_status);
+
+	sdhci_msm_clear_pwrctl_status(host, irq_status);
+
+	/* Handle BUS ON/OFF*/
+	if (irq_status & CORE_PWRCTL_BUS_ON) {
+		ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
+		if (!ret) {
+			ret = sdhci_msm_setup_pins(msm_host->pdata, true);
+			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+					VDD_IO_HIGH, 0);
+		}
+		if (ret)
+			irq_ack |= CORE_PWRCTL_BUS_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+		pwr_state = REQ_BUS_ON;
+		io_level = REQ_IO_HIGH;
+	}
+	if (irq_status & CORE_PWRCTL_BUS_OFF) {
+		if (msm_host->pltfm_init_done)
+			ret = sdhci_msm_setup_vreg(msm_host->pdata,
+					false, false);
+		if (!ret) {
+			ret = sdhci_msm_setup_pins(msm_host->pdata, false);
+			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+					VDD_IO_LOW, 0);
+		}
+		if (ret)
+			irq_ack |= CORE_PWRCTL_BUS_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+		pwr_state = REQ_BUS_OFF;
+		io_level = REQ_IO_LOW;
+	}
+	/* Handle IO LOW/HIGH */
+	if (irq_status & CORE_PWRCTL_IO_LOW) {
+		/* Switch voltage Low */
+		ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_IO_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+		io_level = REQ_IO_LOW;
+	}
+	if (irq_status & CORE_PWRCTL_IO_HIGH) {
+		/* Switch voltage High */
+		ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
+		if (ret)
+			irq_ack |= CORE_PWRCTL_IO_FAIL;
+		else
+			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+		io_level = REQ_IO_HIGH;
+	}
+
+	/* ACK status to the core */
+	sdhci_msm_writeb_relaxed(irq_ack, host,
+			msm_host_offset->CORE_PWRCTL_CTL);
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+	if ((io_level & REQ_IO_HIGH) &&
+			(msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
+			!msm_host->core_3_0v_support)
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC) &
+				~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	else if ((io_level & REQ_IO_LOW) ||
+			(msm_host->caps_0 & CORE_1_8V_SUPPORT))
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC) |
+				CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+
+	pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
+		mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
+	spin_lock_irqsave(&host->lock, flags);
+	if (pwr_state)
+		msm_host->curr_pwr_state = pwr_state;
+	if (io_level)
+		msm_host->curr_io_level = io_level;
+	complete(&msm_host->pwr_irq_completion);
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	return IRQ_HANDLED;
 }
 
+static ssize_t
+show_polling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	int poll;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", poll);
+}
+
+static ssize_t
+store_polling(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	int value;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &value)) {
+		spin_lock_irqsave(&host->lock, flags);
+		if (value) {
+			host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+			mmc_detect_change(host->mmc, 0);
+		} else {
+			host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static ssize_t
+show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			msm_host->msm_bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	uint32_t value;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &value)) {
+		spin_lock_irqsave(&host->lock, flags);
+		msm_host->msm_bus_vote.is_max_bw_needed = !!value;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	unsigned long flags;
+	bool done = false;
+	u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
+
+	spin_lock_irqsave(&host->lock, flags);
+	pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+			mmc_hostname(host->mmc), __func__, req_type,
+			msm_host->curr_pwr_state, msm_host->curr_io_level);
+	if (!msm_host->mci_removed)
+		io_sig_sts = sdhci_msm_readl_relaxed(host,
+				msm_host_offset->CORE_GENERICS);
+
+	/*
+	 * The IRQ for request type IO High/Low will be generated when -
+	 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
+	 * 2. If 1 is true and when there is a state change in 1.8V enable
+	 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
+	 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
+	 * layer tries to set it to 3.3V before card detection happens, the
+	 * IRQ doesn't get triggered as there is no state change in this bit.
+	 * The driver already handles this case by changing the IO voltage
+	 * level to high as part of controller power up sequence. Hence, check
+	 * for host->pwr to handle a case where IO voltage high request is
+	 * issued even before controller power up.
+	 */
+	if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
+		if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
+				((req_type & REQ_IO_HIGH) && !host->pwr)) {
+			pr_debug("%s: do not wait for power IRQ that never comes\n",
+					mmc_hostname(host->mmc));
+			spin_unlock_irqrestore(&host->lock, flags);
+			return;
+		}
+	}
+
+	if ((req_type & msm_host->curr_pwr_state) ||
+			(req_type & msm_host->curr_io_level))
+		done = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/*
+	 * This is needed here to handle a case where IRQ gets
+	 * triggered even before this function is called so that
+	 * x->done counter of completion gets reset. Otherwise,
+	 * next call to wait_for_completion returns immediately
+	 * without actually waiting for the IRQ to be handled.
+	 */
+	if (done)
+		init_completion(&msm_host->pwr_irq_completion);
+	else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+				msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
+		__WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+					mmc_hostname(host->mmc), req_type);
+		MMC_TRACE(host->mmc,
+			"%s: request(%d) timed out waiting for pwr_irq\n",
+			__func__, req_type);
+		sdhci_msm_dump_pwr_ctrl_regs(host);
+	}
+	pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+			__func__, req_type);
+}
+
+static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u32 config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+
+	if (enable) {
+		config |= CORE_CDR_EN;
+		config &= ~CORE_CDR_EXT_EN;
+		writel_relaxed(config, host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG);
+	} else {
+		config &= ~CORE_CDR_EN;
+		config |= CORE_CDR_EXT_EN;
+		writel_relaxed(config, host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG);
+	}
+}
+
+static unsigned int sdhci_msm_max_segs(void)
+{
+	return SDHCI_MSM_MAX_SEGMENTS;
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return msm_host->pdata->sup_clk_table[0];
+}
+
 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	struct clk *core_clk = msm_host->bulk_clks[0].clk;
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int max_clk_index = msm_host->pdata->sup_clk_cnt;
 
-	return clk_round_rate(core_clk, ULONG_MAX);
+	return msm_host->pdata->sup_clk_table[max_clk_index - 1];
 }
 
-static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
-{
-	return SDHCI_MSM_MIN_CLOCK;
-}
-
-/**
- * __sdhci_msm_set_clock - sdhci_msm clock control.
- *
- * Description:
- * MSM controller does not use internal divider and
- * instead directly control the GCC clock as per
- * HW recommendation.
- **/
-static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
-{
-	u16 clk;
-	/*
-	 * Keep actual_clock as zero -
-	 * - since there is no divider used so no need of having actual_clock.
-	 * - MSM controller uses SDCLK for data timeout calculation. If
-	 *   actual_clock is zero, host->clock is taken for calculation.
-	 */
-	host->mmc->actual_clock = 0;
-
-	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-
-	if (clock == 0)
-		return;
-
-	/*
-	 * MSM controller do not use clock divider.
-	 * Thus read SDHCI_CLOCK_CONTROL and only enable
-	 * clock with no divider value programmed.
-	 */
-	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
-	sdhci_enable_clk(host, clk);
-}
-
-/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
-static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
+						u32 req_clk)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned int sel_clk = -1;
+	unsigned char cnt;
+
+	if (req_clk < sdhci_msm_get_min_clock(host)) {
+		sel_clk = sdhci_msm_get_min_clock(host);
+		return sel_clk;
+	}
+
+	for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
+		if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
+			break;
+		} else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
+			sel_clk = msm_host->pdata->sup_clk_table[cnt];
+			break;
+		}
+		sel_clk = msm_host->pdata->sup_clk_table[cnt];
+	}
+	return sel_clk;
+}
+
+static long sdhci_msm_get_bus_aggr_clk_rate(struct sdhci_host *host,
+						u32 apps_clk)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	long sel_clk = -1;
+	unsigned char cnt;
+
+	if (msm_host->pdata->bus_clk_cnt != msm_host->pdata->sup_clk_cnt) {
+		pr_err("%s: %s: mismatch between bus_clk_cnt(%u) and apps_clk_cnt(%u)\n",
+				mmc_hostname(host->mmc), __func__,
+				(unsigned int)msm_host->pdata->bus_clk_cnt,
+				(unsigned int)msm_host->pdata->sup_clk_cnt);
+		return msm_host->pdata->bus_clk_table[0];
+	}
+	if (apps_clk == sdhci_msm_get_min_clock(host)) {
+		sel_clk = msm_host->pdata->bus_clk_table[0];
+		return sel_clk;
+	}
+
+	for (cnt = 0; cnt < msm_host->pdata->bus_clk_cnt; cnt++) {
+		if (msm_host->pdata->sup_clk_table[cnt] > apps_clk)
+			break;
+		sel_clk = msm_host->pdata->bus_clk_table[cnt];
+	}
+	return sel_clk;
+}
+
+static void sdhci_msm_registers_save(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	if (!msm_host->regs_restore.is_supported)
+		return;
+
+	msm_host->regs_restore.vendor_func = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC);
+	msm_host->regs_restore.vendor_pwrctl_mask =
+		readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_PWRCTL_MASK);
+	msm_host->regs_restore.vendor_func2 =
+		readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+	msm_host->regs_restore.vendor_func3 =
+		readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC3);
+	msm_host->regs_restore.hc_2c_2e =
+		sdhci_readl(host, SDHCI_CLOCK_CONTROL);
+	msm_host->regs_restore.hc_3c_3e =
+		sdhci_readl(host, SDHCI_ACMD12_ERR);
+	msm_host->regs_restore.vendor_pwrctl_ctl =
+		readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_PWRCTL_CTL);
+	msm_host->regs_restore.hc_38_3a =
+		sdhci_readl(host, SDHCI_SIGNAL_ENABLE);
+	msm_host->regs_restore.hc_34_36 =
+		sdhci_readl(host, SDHCI_INT_ENABLE);
+	msm_host->regs_restore.hc_28_2a =
+		sdhci_readl(host, SDHCI_HOST_CONTROL);
+	msm_host->regs_restore.vendor_caps_0 =
+		readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+	msm_host->regs_restore.hc_caps_1 =
+		sdhci_readl(host, SDHCI_CAPABILITIES_1);
+	msm_host->regs_restore.testbus_config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_TESTBUS_CONFIG);
+	msm_host->regs_restore.dll_config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+	msm_host->regs_restore.dll_config2 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG_2);
+	msm_host->regs_restore.dll_config = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG);
+	msm_host->regs_restore.dll_config2 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG_2);
+	msm_host->regs_restore.dll_config3 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_CONFIG_3);
+	msm_host->regs_restore.dll_usr_ctl = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_DLL_USR_CTL);
+
+	msm_host->regs_restore.is_valid = true;
+
+	pr_debug("%s: %s: registers saved. PWRCTL_MASK = 0x%x\n",
+		mmc_hostname(host->mmc), __func__,
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_PWRCTL_MASK));
+}
+
+static void sdhci_msm_registers_restore(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u8 irq_status;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	struct mmc_ios ios = host->mmc->ios;
+
+	if (!msm_host->regs_restore.is_supported ||
+		!msm_host->regs_restore.is_valid)
+		return;
+
+	writel_relaxed(0, host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
+	writel_relaxed(msm_host->regs_restore.vendor_func, host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+	writel_relaxed(msm_host->regs_restore.vendor_func2,
+			host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+	writel_relaxed(msm_host->regs_restore.vendor_func3,
+			host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3);
+	sdhci_writel(host, msm_host->regs_restore.hc_2c_2e,
+			SDHCI_CLOCK_CONTROL);
+	sdhci_writel(host, msm_host->regs_restore.hc_3c_3e,
+			SDHCI_ACMD12_ERR);
+	sdhci_writel(host, msm_host->regs_restore.hc_38_3a,
+			SDHCI_SIGNAL_ENABLE);
+	sdhci_writel(host, msm_host->regs_restore.hc_34_36,
+			SDHCI_INT_ENABLE);
+	sdhci_writel(host, msm_host->regs_restore.hc_28_2a,
+			SDHCI_HOST_CONTROL);
+	writel_relaxed(msm_host->regs_restore.vendor_caps_0,
+			host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+	sdhci_writel(host, msm_host->regs_restore.hc_caps_1,
+			SDHCI_CAPABILITIES_1);
+	writel_relaxed(msm_host->regs_restore.testbus_config, host->ioaddr +
+			msm_host_offset->CORE_TESTBUS_CONFIG);
+	msm_host->regs_restore.is_valid = false;
+
+	/*
+	 * Clear the PWRCTL_STATUS register.
+	 * There is a rare HW scenario where the first clear pulse could be
+	 * lost when actual reset and clear/read of status register is
+	 * happening at a time. Hence, retry for at least 10 times to make
+	 * sure status register is cleared. Otherwise, this will result in
+	 * a spurious power IRQ resulting in system instability.
+	 */
+	irq_status = sdhci_msm_readb_relaxed(host,
+				msm_host_offset->CORE_PWRCTL_STATUS);
+
+	sdhci_msm_clear_pwrctl_status(host, irq_status);
+
+	writel_relaxed(msm_host->regs_restore.vendor_pwrctl_ctl,
+			host->ioaddr + msm_host_offset->CORE_PWRCTL_CTL);
+	writel_relaxed(msm_host->regs_restore.vendor_pwrctl_mask,
+			host->ioaddr + msm_host_offset->CORE_PWRCTL_MASK);
+
+	if (((ios.timing == MMC_TIMING_MMC_HS400) ||
+			(ios.timing == MMC_TIMING_MMC_HS200) ||
+			(ios.timing == MMC_TIMING_UHS_SDR104))
+			&& (ios.clock > CORE_FREQ_100MHZ)) {
+		writel_relaxed(msm_host->regs_restore.dll_config2,
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+		writel_relaxed(msm_host->regs_restore.dll_config3,
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_3);
+		writel_relaxed(msm_host->regs_restore.dll_usr_ctl,
+			host->ioaddr + msm_host_offset->CORE_DLL_USR_CTL);
+		writel_relaxed(msm_host->regs_restore.dll_config &
+			~(CORE_DLL_RST | CORE_DLL_PDN),
+			host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+		msm_init_cm_dll(host, DLL_INIT_FROM_CX_COLLAPSE_EXIT);
+		msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+	}
+
+	pr_debug("%s: %s: registers restored. PWRCTL_MASK = 0x%x\n",
+		mmc_hostname(host->mmc), __func__,
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_PWRCTL_MASK));
+}
+
+static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int rc = 0;
+
+	if (atomic_read(&msm_host->controller_clock))
+		return 0;
+
+	sdhci_msm_bus_voting(host, 1);
+
+	if (!IS_ERR(msm_host->pclk)) {
+		rc = clk_prepare_enable(msm_host->pclk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the pclk with error %d\n",
+			       mmc_hostname(host->mmc), __func__, rc);
+			goto remove_vote;
+		}
+	}
+
+	if (!IS_ERR(msm_host->bus_aggr_clk)) {
+		rc = clk_prepare_enable(msm_host->bus_aggr_clk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the bus aggr clk with error %d\n",
+			       mmc_hostname(host->mmc), __func__, rc);
+			goto disable_pclk;
+		}
+	}
+
+	rc = clk_prepare_enable(msm_host->clk);
+	if (rc) {
+		pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+		       mmc_hostname(host->mmc), __func__, rc);
+		goto disable_bus_aggr_clk;
+	}
+
+	if (!IS_ERR(msm_host->ice_clk)) {
+		rc = clk_prepare_enable(msm_host->ice_clk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
+				mmc_hostname(host->mmc), __func__, rc);
+			goto disable_host_clk;
+		}
+	}
+	atomic_set(&msm_host->controller_clock, 1);
+	pr_debug("%s: %s: enabled controller clock\n",
+			mmc_hostname(host->mmc), __func__);
+	sdhci_msm_registers_restore(host);
+	goto out;
+
+disable_host_clk:
+	if (!IS_ERR(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
+disable_bus_aggr_clk:
+	if (!IS_ERR(msm_host->bus_aggr_clk))
+		clk_disable_unprepare(msm_host->bus_aggr_clk);
+disable_pclk:
+	if (!IS_ERR(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+remove_vote:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+	return rc;
+}
+
+static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	if (atomic_read(&msm_host->controller_clock)) {
+		sdhci_msm_registers_save(host);
+		if (!IS_ERR(msm_host->clk))
+			clk_disable_unprepare(msm_host->clk);
+		if (!IS_ERR(msm_host->ice_clk))
+			clk_disable_unprepare(msm_host->ice_clk);
+		if (!IS_ERR(msm_host->bus_aggr_clk))
+			clk_disable_unprepare(msm_host->bus_aggr_clk);
+		if (!IS_ERR(msm_host->pclk))
+			clk_disable_unprepare(msm_host->pclk);
+		sdhci_msm_bus_voting(host, 0);
+		atomic_set(&msm_host->controller_clock, 0);
+		pr_debug("%s: %s: disabled controller clock\n",
+			mmc_hostname(host->mmc), __func__);
+	}
+}
+
+static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int rc = 0;
+
+	if (enable && !atomic_read(&msm_host->clks_on)) {
+		pr_debug("%s: request to enable clocks\n",
+				mmc_hostname(host->mmc));
+
+		/*
+		 * The bus-width or the clock rate might have changed
+		 * after controller clocks are enbaled, update bus vote
+		 * in such case.
+		 */
+		if (atomic_read(&msm_host->controller_clock))
+			sdhci_msm_bus_voting(host, 1);
+
+		rc = sdhci_msm_enable_controller_clock(host);
+		if (rc)
+			goto remove_vote;
+
+		if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
+			rc = clk_prepare_enable(msm_host->bus_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_controller_clk;
+			}
+		}
+		if (!IS_ERR(msm_host->ff_clk)) {
+			rc = clk_prepare_enable(msm_host->ff_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_bus_clk;
+			}
+		}
+		if (!IS_ERR(msm_host->sleep_clk)) {
+			rc = clk_prepare_enable(msm_host->sleep_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_ff_clk;
+			}
+		}
+		/*
+		 * SDHC has core_mem and hc_mem device memory and these memory
+		 * addresses do not fall within 1KB region. Hence, any update to
+		 * core_mem address space would require an mb() to ensure this
+		 * gets completed before its next update to registers within
+		 * hc_mem.
+		 */
+		mb();
+
+	} else if (!enable && atomic_read(&msm_host->clks_on)) {
+		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+		/*
+		 * SDHC has core_mem and hc_mem device memory and these memory
+		 * addresses do not fall within 1KB region. Hence, any update
+		 * to core_mem address space would require an mb() to ensure
+		 * this gets completed before its next update to registers
+		 * within hc_mem.
+		 */
+		mb();
+		/*
+		 * During 1.8V signal switching the clock source must
+		 * still be ON as it requires accessing SDHC
+		 * registers (SDHCi host control2 register bit 3 must
+		 * be written and polled after stopping the SDCLK).
+		 */
+		if (host->mmc->card_clock_off)
+			return 0;
+		pr_debug("%s: request to disable clocks\n",
+				mmc_hostname(host->mmc));
+		if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
+			clk_disable_unprepare(msm_host->sleep_clk);
+		if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+			clk_disable_unprepare(msm_host->ff_clk);
+		if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+			clk_disable_unprepare(msm_host->bus_clk);
+		sdhci_msm_disable_controller_clock(host);
+	}
+	atomic_set(&msm_host->clks_on, enable);
+	goto out;
+disable_ff_clk:
+	if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+		clk_disable_unprepare(msm_host->ff_clk);
+disable_bus_clk:
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+		clk_disable_unprepare(msm_host->bus_clk);
+disable_controller_clk:
+	if (!IS_ERR_OR_NULL(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
+	if (!IS_ERR(msm_host->ice_clk))
+		clk_disable_unprepare(msm_host->ice_clk);
+	if (!IS_ERR_OR_NULL(msm_host->bus_aggr_clk))
+		clk_disable_unprepare(msm_host->bus_aggr_clk);
+	if (!IS_ERR_OR_NULL(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+	atomic_set(&msm_host->controller_clock, 0);
+remove_vote:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+	return rc;
+}
+
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	int rc;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	struct mmc_card *card = host->mmc->card;
+	struct mmc_ios	curr_ios = host->mmc->ios;
+	u32 sup_clock, ddr_clock, dll_lock;
+	long bus_clk_rate;
+	bool curr_pwrsave;
 
 	if (!clock) {
-		msm_host->clk_rate = clock;
+		/*
+		 * disable pwrsave to ensure clock is not auto-gated until
+		 * the rate is >400KHz (initialization complete).
+		 */
+		writel_relaxed(readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) &
+			~CORE_CLK_PWRSAVE, host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
+		sdhci_msm_prepare_clocks(host, false);
+		host->clock = clock;
 		goto out;
 	}
 
-	sdhci_msm_hc_select_mode(host);
+	rc = sdhci_msm_prepare_clocks(host, true);
+	if (rc)
+		goto out;
 
-	msm_set_clock_rate_for_bus_mode(host, clock);
+	curr_pwrsave = !!(readl_relaxed(host->ioaddr +
+	msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+	if ((clock > 400000) &&
+	    !curr_pwrsave && card && mmc_host_may_gate_card(card))
+		writel_relaxed(readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				| CORE_CLK_PWRSAVE, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	/*
+	 * Disable pwrsave for a newly added card if doesn't allow clock
+	 * gating.
+	 */
+	else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
+		writel_relaxed(readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				& ~CORE_CLK_PWRSAVE, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+
+	sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
+	if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
+		(curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
+		(curr_ios.timing == MMC_TIMING_MMC_HS400)) {
+		/*
+		 * The SDHC requires internal clock frequency to be double the
+		 * actual clock that will be set for DDR mode. The controller
+		 * uses the faster clock(100/400MHz) for some of its parts and
+		 * send the actual required clock (50/200MHz) to the card.
+		 */
+		ddr_clock = clock * 2;
+		sup_clock = sdhci_msm_get_sup_clk_rate(host,
+				ddr_clock);
+	}
+
+	/*
+	 * In general all timing modes are controlled via UHS mode select in
+	 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+	 * their respective modes defined here, hence we use these values.
+	 *
+	 * HS200 - SDR104 (Since they both are equivalent in functionality)
+	 * HS400 - This involves multiple configurations
+	 *		Initially SDR104 - when tuning is required as HS200
+	 *		Then when switching to DDR @ 400MHz (HS400) we use
+	 *		the vendor specific HC_SELECT_IN to control the mode.
+	 *
+	 * In addition to controlling the modes we also need to select the
+	 * correct input clock for DLL depending on the mode.
+	 *
+	 * HS400 - divided clock (free running MCLK/2)
+	 * All other modes - default (free running MCLK)
+	 */
+	if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+		/* Select the divided clock (free running MCLK/2) */
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				& ~CORE_HC_MCLK_SEL_MASK)
+				| CORE_HC_MCLK_SEL_HS400), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+		/*
+		 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+		 * register
+		 */
+		if ((msm_host->tuning_done ||
+				(card && mmc_card_strobe(card) &&
+				 msm_host->enhanced_strobe)) &&
+				!msm_host->calibration_done) {
+			/*
+			 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+			 * field in VENDOR_SPEC_FUNC
+			 */
+			writel_relaxed((readl_relaxed(host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC)
+					| CORE_HC_SELECT_IN_HS400
+					| CORE_HC_SELECT_IN_EN), host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC);
+		}
+		if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
+			/*
+			 * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
+			 * CORE_DLL_STATUS to be set.  This should get set
+			 * with in 15 us at 200 MHz.
+			 */
+			rc = readl_poll_timeout(host->ioaddr +
+					msm_host_offset->CORE_DLL_STATUS,
+					dll_lock, (dll_lock & (CORE_DLL_LOCK |
+					CORE_DDR_DLL_LOCK)), 10, 1000);
+			if (rc == -ETIMEDOUT)
+				pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+						mmc_hostname(host->mmc),
+						dll_lock);
+		}
+	} else {
+		if (!msm_host->use_cdclp533)
+			/* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
+			writel_relaxed((readl_relaxed(host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC3)
+					& ~CORE_PWRSAVE_DLL), host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC3);
+
+		/* Select the default clock (free running MCLK) */
+		writel_relaxed(((readl_relaxed(host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC)
+					& ~CORE_HC_MCLK_SEL_MASK)
+					| CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
+					msm_host_offset->CORE_VENDOR_SPEC);
+
+		/*
+		 * Disable HC_SELECT_IN to be able to use the UHS mode select
+		 * configuration from Host Control2 register for all other
+		 * modes.
+		 *
+		 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+		 * in VENDOR_SPEC_FUNC
+		 */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC)
+				& ~CORE_HC_SELECT_IN_EN
+				& ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC);
+	}
+	/*
+	 * SDHC has core_mem and hc_mem device memory and these memory
+	 * addresses do not fall within 1KB region. Hence, any update to
+	 * core_mem address space would require an mb() to ensure this gets
+	 * completed before its next update to registers within hc_mem.
+	 */
+	mb();
+
+	if (sup_clock != msm_host->clk_rate) {
+		pr_debug("%s: %s: setting clk rate to %u\n",
+				mmc_hostname(host->mmc), __func__, sup_clock);
+		rc = clk_set_rate(msm_host->clk, sup_clock);
+		if (rc) {
+			pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
+					mmc_hostname(host->mmc), __func__,
+					sup_clock, rc);
+			goto out;
+		}
+		msm_host->clk_rate = sup_clock;
+		host->clock = clock;
+
+		if (!IS_ERR(msm_host->bus_aggr_clk) &&
+				msm_host->pdata->bus_clk_cnt) {
+			bus_clk_rate = sdhci_msm_get_bus_aggr_clk_rate(host,
+					sup_clock);
+			if (bus_clk_rate >= 0) {
+				rc = clk_set_rate(msm_host->bus_aggr_clk,
+						bus_clk_rate);
+				if (rc) {
+					pr_err("%s: %s: Failed to set rate %ld for bus-aggr-clk : %d\n",
+						mmc_hostname(host->mmc),
+						__func__, bus_clk_rate, rc);
+					goto out;
+				}
+			} else {
+				pr_err("%s: %s: Unsupported apps clk rate %u for bus-aggr-clk, err: %ld\n",
+					mmc_hostname(host->mmc), __func__,
+					sup_clock, bus_clk_rate);
+			}
+		}
+
+		/* Configure pinctrl drive type according to
+		 * current clock rate
+		 */
+		rc = sdhci_msm_config_pinctrl_drv_type(msm_host->pdata, clock);
+		if (rc)
+			pr_err("%s: %s: Failed to set pinctrl drive type for clock rate %u (%d)\n",
+					mmc_hostname(host->mmc), __func__,
+					clock, rc);
+
+		/*
+		 * Update the bus vote in case of frequency change due to
+		 * clock scaling.
+		 */
+		sdhci_msm_bus_voting(host, 1);
+	}
 out:
-	__sdhci_msm_set_clock(host, clock);
+	sdhci_set_clock(host, clock);
+}
+
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+					unsigned int uhs)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	u16 ctrl_2;
+
+	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+	/* Select Bus Speed Mode for host */
+	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+	if ((uhs == MMC_TIMING_MMC_HS400) ||
+		(uhs == MMC_TIMING_MMC_HS200) ||
+		(uhs == MMC_TIMING_UHS_SDR104))
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+	else if (uhs == MMC_TIMING_UHS_SDR12)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+	else if (uhs == MMC_TIMING_UHS_SDR25)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+	else if (uhs == MMC_TIMING_UHS_SDR50)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+	else if ((uhs == MMC_TIMING_UHS_DDR50) ||
+		 (uhs == MMC_TIMING_MMC_DDR52))
+		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+	/*
+	 * When clock frquency is less than 100MHz, the feedback clock must be
+	 * provided and DLL must not be used so that tuning can be skipped. To
+	 * provide feedback clock, the mode selection can be any value less
+	 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+	 */
+	if (host->clock <= CORE_FREQ_100MHZ) {
+		if ((uhs == MMC_TIMING_MMC_HS400) ||
+		    (uhs == MMC_TIMING_MMC_HS200) ||
+		    (uhs == MMC_TIMING_UHS_SDR104))
+			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+		/*
+		 * Make sure DLL is disabled when not required
+		 *
+		 * Write 1 to DLL_RST bit of DLL_CONFIG register
+		 */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				| CORE_DLL_RST), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
+
+		/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+		writel_relaxed((readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG)
+				| CORE_DLL_PDN), host->ioaddr +
+				msm_host_offset->CORE_DLL_CONFIG);
+		/*
+		 * SDHC has core_mem and hc_mem device memory and these memory
+		 * addresses do not fall within 1KB region. Hence, any update to
+		 * core_mem address space would require an mb() to ensure this
+		 * gets completed before its next update to registers within
+		 * hc_mem.
+		 */
+		mb();
+
+		/*
+		 * The DLL needs to be restored and CDCLP533 recalibrated
+		 * when the clock frequency is set back to 400MHz.
+		 */
+		msm_host->calibration_done = false;
+	}
+
+	pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
+		mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
+	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+}
+
+#define MAX_TEST_BUS 60
+
+static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
+
+	memcpy(&cached_data->copy_mmc, msm_host->mmc,
+		sizeof(struct mmc_host));
+	if (msm_host->mmc->card)
+		memcpy(&cached_data->copy_card, msm_host->mmc->card,
+			sizeof(struct mmc_card));
+	memcpy(&cached_data->copy_host, host,
+		sizeof(struct sdhci_host));
+}
+
+void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+	int tbsel, tbsel2;
+	int i, index = 0;
+	u32 test_bus_val = 0;
+	u32 debug_reg[MAX_TEST_BUS] = {0};
+	u32 sts = 0;
+
+	sdhci_msm_cache_debug_data(host);
+	pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+
+	MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_DATA_CNT),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_FIFO_CNT));
+	pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_DATA_CNT),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_FIFO_CNT),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_STATUS));
+	pr_info("DLL cfg:  0x%08x | DLL sts:  0x%08x | SDCC ver: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_STATUS),
+		sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_MCI_VERSION));
+	pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
+	pr_info("Vndr func2: 0x%08x | dll_config_2: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_FUNC2),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_2));
+	pr_info("dll_config_3: 0x%08x | ddr_config: 0x%08x |  dll_usr_ctl: 0x%08x\n",
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_CONFIG_3),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DDR_CONFIG),
+		readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_DLL_USR_CTL));
+	/*
+	 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
+	 * of CORE_TESTBUS_CONFIG register.
+	 *
+	 * To select test bus 0 to 7 use tbsel and to select any test bus
+	 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
+	 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
+	 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
+	 */
+	for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
+		for (tbsel = 0; tbsel < 8; tbsel++) {
+			if (index >= MAX_TEST_BUS)
+				break;
+			test_bus_val =
+			(tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
+				tbsel | msm_host_offset->CORE_TESTBUS_ENA;
+			sdhci_msm_writel_relaxed(test_bus_val, host,
+				msm_host_offset->CORE_TESTBUS_CONFIG);
+			debug_reg[index++] = sdhci_msm_readl_relaxed(host,
+				msm_host_offset->CORE_SDCC_DEBUG_REG);
+		}
+	}
+	for (i = 0; i < MAX_TEST_BUS; i = i + 4)
+		pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				i, i + 3, debug_reg[i], debug_reg[i+1],
+				debug_reg[i+2], debug_reg[i+3]);
+	if (host->is_crypto_en) {
+		sdhci_msm_ice_get_status(host, &sts);
+		pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
+		sdhci_msm_ice_print_regs(host);
+	}
+}
+
+static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	/* Set ICE core to be reset in sync with SDHC core */
+	if (msm_host->ice.pdev) {
+		if (msm_host->ice_hci_support)
+			writel_relaxed(1, host->ioaddr +
+						HC_VENDOR_SPECIFIC_ICE_CTRL);
+		else
+			writel_relaxed(1,
+				host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+	}
+
+	sdhci_reset(host, mask);
 }
 
 /*
- * Platform specific register write functions. This is so that, if any
- * register write needs to be followed up by platform specific actions,
- * they can be added here. These functions can go to sleep when writes
- * to certain registers are done.
- * These functions are relying on sdhci_set_ios not using spinlock.
+ * sdhci_msm_enhanced_strobe_mask :-
+ * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
+ * SW should write 3 to
+ * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
+ * The default reset value of this register is 2.
  */
-static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
-	u32 req_type = 0;
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
-	switch (reg) {
-	case SDHCI_HOST_CONTROL2:
-		req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
-			REQ_IO_HIGH;
-		break;
-	case SDHCI_SOFTWARE_RESET:
-		if (host->pwr && (val & SDHCI_RESET_ALL))
-			req_type = REQ_BUS_OFF;
-		break;
-	case SDHCI_POWER_CONTROL:
-		req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
-		break;
+	if (!msm_host->enhanced_strobe ||
+			!mmc_card_strobe(msm_host->mmc->card)) {
+		pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+				mmc_hostname(host->mmc));
+		return;
 	}
 
-	if (req_type) {
-		msm_host->pwr_irq_flag = 0;
-		/*
-		 * Since this register write may trigger a power irq, ensure
-		 * all previous register writes are complete by this point.
-		 */
-		mb();
+	if (set) {
+		writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3)
+			| CORE_CMDEN_HS400_INPUT_MASK_CNT),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+	} else {
+		writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3)
+			& ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
 	}
-	return req_type;
 }
 
-/* This function may sleep*/
-static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
 {
-	u32 req_type = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
-	req_type = __sdhci_msm_check_write(host, val, reg);
-	writew_relaxed(val, host->ioaddr + reg);
+	if (set) {
+		sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
+			host, msm_host_offset->CORE_TESTBUS_CONFIG);
+	} else {
+		u32 value;
 
-	if (req_type)
-		sdhci_msm_check_power_status(host, req_type);
+		value = sdhci_msm_readl_relaxed(host,
+			msm_host_offset->CORE_TESTBUS_CONFIG);
+		value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
+		sdhci_msm_writel_relaxed(value, host,
+			msm_host_offset->CORE_TESTBUS_CONFIG);
+	}
 }
 
-/* This function may sleep*/
-static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
 {
-	u32 req_type = 0;
+	u32 vendor_func2;
+	unsigned long timeout;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
 
-	req_type = __sdhci_msm_check_write(host, val, reg);
+	vendor_func2 = readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
 
-	writeb_relaxed(val, host->ioaddr + reg);
-
-	if (req_type)
-		sdhci_msm_check_power_status(host, req_type);
+	if (enable) {
+		writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+		timeout = 10000;
+		while (readl_relaxed(host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
+			if (timeout == 0) {
+				pr_info("%s: Applying wait idle disable workaround\n",
+					mmc_hostname(host->mmc));
+				/*
+				 * Apply the reset workaround to not wait for
+				 * pending data transfers on AXI before
+				 * resetting the controller. This could be
+				 * risky if the transfers were stuck on the
+				 * AXI bus.
+				 */
+				vendor_func2 = readl_relaxed(host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+				writel_relaxed(vendor_func2 |
+				HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
+				msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+				host->reset_wa_t = ktime_get();
+				return;
+			}
+			timeout--;
+			udelay(10);
+		}
+		pr_info("%s: waiting for SW_RST_REQ is successful\n",
+				mmc_hostname(host->mmc));
+	} else {
+		writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+	}
 }
 
-static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
+static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
 {
-	struct mmc_host *mmc = msm_host->mmc;
-	struct regulator *supply = mmc->supply.vqmmc;
-	u32 caps = 0, config;
-	struct sdhci_host *host = mmc_priv(mmc);
-	const struct sdhci_msm_offset *msm_offset = msm_host->offset;
+	struct sdhci_msm_pm_qos_irq *pm_qos_irq =
+		container_of(work, struct sdhci_msm_pm_qos_irq,
+			     unvote_work.work);
 
-	if (!IS_ERR(mmc->supply.vqmmc)) {
-		if (regulator_is_supported_voltage(supply, 1700000, 1950000))
-			caps |= CORE_1_8V_SUPPORT;
-		if (regulator_is_supported_voltage(supply, 2700000, 3600000))
-			caps |= CORE_3_0V_SUPPORT;
+	if (atomic_read(&pm_qos_irq->counter))
+		return;
 
-		if (!caps)
-			pr_warn("%s: 1.8/3V not supported for vqmmc\n",
-					mmc_hostname(mmc));
+	pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
+}
+
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_latency *latency =
+		&msm_host->pdata->pm_qos_data.irq_latency;
+	int counter;
+
+	if (!msm_host->pm_qos_irq.enabled)
+		return;
+
+	counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
+	/* Make sure to update the voting in case power policy has changed */
+	if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
+		&& counter > 1)
+		return;
+
+	cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+	msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
+	pm_qos_update_request(&msm_host->pm_qos_irq.req,
+				msm_host->pm_qos_irq.latency);
+}
+
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int counter;
+
+	if (!msm_host->pm_qos_irq.enabled)
+		return;
+
+	if (atomic_read(&msm_host->pm_qos_irq.counter)) {
+		counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
+	} else {
+		WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
+		return;
 	}
 
-	if (caps) {
-		/*
-		 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
-		 * bit can be used as required later on.
-		 */
-		u32 io_level = msm_host->curr_io_level;
+	if (counter)
+		return;
 
-		config = readl_relaxed(host->ioaddr +
-				msm_offset->core_vendor_spec);
-		config |= CORE_IO_PAD_PWR_SWITCH_EN;
-
-		if ((io_level & REQ_IO_HIGH) && (caps &	CORE_3_0V_SUPPORT))
-			config &= ~CORE_IO_PAD_PWR_SWITCH;
-		else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
-			config |= CORE_IO_PAD_PWR_SWITCH;
-
-		writel_relaxed(config,
-				host->ioaddr + msm_offset->core_vendor_spec);
+	if (async) {
+		queue_delayed_work(msm_host->pm_qos_wq,
+				&msm_host->pm_qos_irq.unvote_work,
+				msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+		return;
 	}
-	msm_host->caps_0 |= caps;
-	pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
+
+	msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&msm_host->pm_qos_irq.req,
+			msm_host->pm_qos_irq.latency);
 }
 
-static const struct sdhci_msm_variant_ops mci_var_ops = {
-	.msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
-	.msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
-};
+static ssize_t
+sdhci_msm_pm_qos_irq_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
 
-static const struct sdhci_msm_variant_ops v5_var_ops = {
-	.msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
-	.msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
-};
+	return snprintf(buf, PAGE_SIZE,
+		"IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
+		irq->enabled, atomic_read(&irq->counter), irq->latency);
+}
 
-static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
-	.mci_removed = false,
-	.var_ops = &mci_var_ops,
-	.offset = &sdhci_msm_mci_offset,
-};
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 
-static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
-	.mci_removed = true,
-	.var_ops = &v5_var_ops,
-	.offset = &sdhci_msm_v5_offset,
-};
+	return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
+}
 
-static const struct of_device_id sdhci_msm_dt_match[] = {
-	{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
-	{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
-	{},
-};
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	uint32_t value;
+	bool enable;
+	int ret;
 
-MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+	ret = kstrtou32(buf, 0, &value);
+	if (ret)
+		goto out;
+	enable = !!value;
 
-static const struct sdhci_ops sdhci_msm_ops = {
-	.reset = sdhci_reset,
+	if (enable == msm_host->pm_qos_irq.enabled)
+		goto out;
+
+	msm_host->pm_qos_irq.enabled = enable;
+	if (!enable) {
+		cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+		atomic_set(&msm_host->pm_qos_irq.counter, 0);
+		msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+		pm_qos_update_request(&msm_host->pm_qos_irq.req,
+				msm_host->pm_qos_irq.latency);
+	}
+
+out:
+	return count;
+}
+
+#ifdef CONFIG_SMP
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+				struct sdhci_host *host)
+{
+	msm_host->pm_qos_irq.req.irq = host->irq;
+}
+#else
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+				struct sdhci_host *host) { }
+#endif
+
+static bool sdhci_msm_pm_qos_wq_init(struct sdhci_msm_host *msm_host)
+{
+	char *wq = NULL;
+	bool ret = true;
+
+	wq = kasprintf(GFP_KERNEL, "sdhci_msm_pm_qos/%s",
+			dev_name(&msm_host->pdev->dev));
+	if (!wq)
+		return false;
+	/*
+	 * Create a work queue with flag WQ_MEM_RECLAIM set for
+	 * pm_qos_unvote work. Because mmc thread is created with
+	 * flag PF_MEMALLOC set, kernel will check for work queue
+	 * flag WQ_MEM_RECLAIM when flush the work queue. If work
+	 * queue flag WQ_MEM_RECLAIM is not set, kernel warning
+	 * will be triggered.
+	 */
+	msm_host->pm_qos_wq = create_workqueue(wq);
+	if (!msm_host->pm_qos_wq) {
+		ret = false;
+		dev_err(&msm_host->pdev->dev,
+				"failed to create pm qos unvote work queue\n");
+	}
+	kfree(wq);
+	return ret;
+}
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_latency *irq_latency;
+	int ret;
+
+	if (!msm_host->pdata->pm_qos_data.irq_valid)
+		return;
+
+	/* Initialize only once as this gets called per partition */
+	if (msm_host->pm_qos_irq.enabled)
+		return;
+
+	atomic_set(&msm_host->pm_qos_irq.counter, 0);
+	msm_host->pm_qos_irq.req.type =
+			msm_host->pdata->pm_qos_data.irq_req_type;
+	if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
+		(msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
+		set_affine_irq(msm_host, host);
+	else
+		cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
+			cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
+
+	sdhci_msm_pm_qos_wq_init(msm_host);
+
+	INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
+		sdhci_msm_pm_qos_irq_unvote_work);
+	/* For initialization phase, set the performance latency */
+	irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
+	msm_host->pm_qos_irq.latency =
+		irq_latency->latency[SDHCI_PERFORMANCE_MODE];
+	pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
+			msm_host->pm_qos_irq.latency);
+	msm_host->pm_qos_irq.enabled = true;
+
+	/* sysfs */
+	msm_host->pm_qos_irq.enable_attr.show =
+		sdhci_msm_pm_qos_irq_enable_show;
+	msm_host->pm_qos_irq.enable_attr.store =
+		sdhci_msm_pm_qos_irq_enable_store;
+	sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
+	msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
+	msm_host->pm_qos_irq.enable_attr.attr.mode = 0644;
+	ret = device_create_file(&msm_host->pdev->dev,
+		&msm_host->pm_qos_irq.enable_attr);
+	if (ret)
+		pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
+			__func__, ret);
+
+	msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
+	msm_host->pm_qos_irq.status_attr.store = NULL;
+	sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
+	msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
+	msm_host->pm_qos_irq.status_attr.attr.mode = 0444;
+	ret = device_create_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_irq.status_attr);
+	if (ret)
+		pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
+			__func__, ret);
+}
+
+static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pm_qos_group *group;
+	int i;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	int offset = 0;
+
+	for (i = 0; i < nr_groups; i++) {
+		group = &msm_host->pm_qos[i];
+		offset += snprintf(&buf[offset], PAGE_SIZE,
+			"Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
+			i, group->req.cpus_affine.bits[0],
+			msm_host->pm_qos_group_enable,
+			atomic_read(&group->counter),
+			group->latency);
+	}
+
+	return offset;
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		msm_host->pm_qos_group_enable ? "enabled" : "disabled");
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	uint32_t value;
+	bool enable;
+	int ret;
+	int i;
+
+	ret = kstrtou32(buf, 0, &value);
+	if (ret)
+		goto out;
+	enable = !!value;
+
+	if (enable == msm_host->pm_qos_group_enable)
+		goto out;
+
+	msm_host->pm_qos_group_enable = enable;
+	if (!enable) {
+		for (i = 0; i < nr_groups; i++) {
+			cancel_delayed_work_sync(
+				&msm_host->pm_qos[i].unvote_work);
+			atomic_set(&msm_host->pm_qos[i].counter, 0);
+			msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
+			pm_qos_update_request(&msm_host->pm_qos[i].req,
+				msm_host->pm_qos[i].latency);
+		}
+	}
+
+out:
+	return count;
+}
+
+static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
+{
+	int i;
+	struct sdhci_msm_cpu_group_map *map =
+			&msm_host->pdata->pm_qos_data.cpu_group_map;
+
+	if (cpu < 0)
+		goto not_found;
+
+	for (i = 0; i < map->nr_groups; i++)
+		if (cpumask_test_cpu(cpu, &map->mask[i]))
+			return i;
+
+not_found:
+	return -EINVAL;
+}
+
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency, int cpu)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+	struct sdhci_msm_pm_qos_group *pm_qos_group;
+	int counter;
+
+	if (!msm_host->pm_qos_group_enable || group < 0)
+		return;
+
+	pm_qos_group = &msm_host->pm_qos[group];
+	counter = atomic_inc_return(&pm_qos_group->counter);
+
+	/* Make sure to update the voting in case power policy has changed */
+	if (pm_qos_group->latency == latency->latency[host->power_policy]
+		&& counter > 1)
+		return;
+
+	cancel_delayed_work_sync(&pm_qos_group->unvote_work);
+
+	pm_qos_group->latency = latency->latency[host->power_policy];
+	pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
+}
+
+static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
+{
+	struct sdhci_msm_pm_qos_group *group =
+		container_of(work, struct sdhci_msm_pm_qos_group,
+			     unvote_work.work);
+
+	if (atomic_read(&group->counter))
+		return;
+
+	group->latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&group->req, group->latency);
+}
+
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+
+	if (!msm_host->pm_qos_group_enable || group < 0 ||
+		atomic_dec_return(&msm_host->pm_qos[group].counter))
+		return false;
+
+	if (async) {
+		queue_delayed_work(msm_host->pm_qos_wq,
+				&msm_host->pm_qos[group].unvote_work,
+				msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+		return true;
+	}
+
+	msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
+	pm_qos_update_request(&msm_host->pm_qos[group].req,
+				msm_host->pm_qos[group].latency);
+	return true;
+}
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	struct sdhci_msm_pm_qos_group *group;
+	int i;
+	int ret;
+
+	if (msm_host->pm_qos_group_enable)
+		return;
+
+	msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
+			GFP_KERNEL);
+	if (!msm_host->pm_qos)
+		return;
+
+	for (i = 0; i < nr_groups; i++) {
+		group = &msm_host->pm_qos[i];
+		INIT_DELAYED_WORK(&group->unvote_work,
+			sdhci_msm_pm_qos_cpu_unvote_work);
+		atomic_set(&group->counter, 0);
+		group->req.type = PM_QOS_REQ_AFFINE_CORES;
+		cpumask_copy(&group->req.cpus_affine,
+			&msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
+		/* We set default latency here for all pm_qos cpu groups. */
+		group->latency = PM_QOS_DEFAULT_VALUE;
+		pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
+			group->latency);
+		pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d\n",
+			__func__, i,
+			group->req.cpus_affine.bits[0],
+			group->latency);
+	}
+	msm_host->pm_qos_prev_cpu = -1;
+	msm_host->pm_qos_group_enable = true;
+
+	/* sysfs */
+	msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
+	msm_host->pm_qos_group_status_attr.store = NULL;
+	sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
+	msm_host->pm_qos_group_status_attr.attr.name =
+			"pm_qos_cpu_groups_status";
+	msm_host->pm_qos_group_status_attr.attr.mode = 0444;
+	ret = device_create_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_group_status_attr);
+	if (ret)
+		dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
+			__func__, ret);
+	msm_host->pm_qos_group_enable_attr.show =
+			sdhci_msm_pm_qos_group_enable_show;
+	msm_host->pm_qos_group_enable_attr.store =
+			sdhci_msm_pm_qos_group_enable_store;
+	sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
+	msm_host->pm_qos_group_enable_attr.attr.name =
+			"pm_qos_cpu_groups_enable";
+	msm_host->pm_qos_group_enable_attr.attr.mode = 0444;
+	ret = device_create_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_group_enable_attr);
+	if (ret)
+		dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
+			__func__, ret);
+}
+
+static void sdhci_msm_pre_req(struct sdhci_host *host,
+		struct mmc_request *mmc_req)
+{
+	int cpu;
+	int group;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int prev_group = sdhci_msm_get_cpu_group(msm_host,
+			msm_host->pm_qos_prev_cpu);
+
+	sdhci_msm_pm_qos_irq_vote(host);
+
+	cpu = get_cpu();
+	put_cpu();
+	group = sdhci_msm_get_cpu_group(msm_host, cpu);
+	if (group < 0)
+		return;
+
+	if (group != prev_group && prev_group >= 0) {
+		sdhci_msm_pm_qos_cpu_unvote(host,
+				msm_host->pm_qos_prev_cpu, false);
+		prev_group = -1; /* make sure to vote for new group */
+	}
+
+	if (prev_group < 0) {
+		sdhci_msm_pm_qos_cpu_vote(host,
+				msm_host->pdata->pm_qos_data.latency, cpu);
+		msm_host->pm_qos_prev_cpu = cpu;
+	}
+}
+
+static void sdhci_msm_post_req(struct sdhci_host *host,
+				struct mmc_request *mmc_req)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	sdhci_msm_pm_qos_irq_unvote(host, false);
+
+	if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
+		msm_host->pm_qos_prev_cpu = -1;
+}
+
+static void sdhci_msm_init(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	sdhci_msm_pm_qos_irq_init(host);
+
+	if (msm_host->pdata->pm_qos_data.legacy_valid)
+		sdhci_msm_pm_qos_cpu_init(host,
+				msm_host->pdata->pm_qos_data.latency);
+}
+
+static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
+	u32 max_curr = 0;
+
+	if (curr_slot && curr_slot->vdd_data)
+		max_curr = curr_slot->vdd_data->hpm_uA;
+
+	return max_curr;
+}
+
+static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	u32 clk_rate = 0;
+
+	if (!IS_ERR(msm_host->ice_clk)) {
+		clk_rate = (state == MMC_LOAD_LOW) ?
+			msm_host->pdata->ice_clk_min :
+			msm_host->pdata->ice_clk_max;
+		if (msm_host->ice_clk_rate == clk_rate)
+			return 0;
+		pr_debug("%s: changing ICE clk rate to %u\n",
+				mmc_hostname(host->mmc), clk_rate);
+		ret = clk_set_rate(msm_host->ice_clk, clk_rate);
+		if (ret) {
+			pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
+				mmc_hostname(host->mmc), ret, clk_rate);
+			return ret;
+		}
+		msm_host->ice_clk_rate = clk_rate;
+	}
+	return 0;
+}
+
+static struct sdhci_ops sdhci_msm_ops = {
+	.crypto_engine_cfg = sdhci_msm_ice_cfg,
+	.crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
+	.crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
+	.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
+	.crypto_engine_reset = sdhci_msm_ice_reset,
+	.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+	.check_power_status = sdhci_msm_check_power_status,
+	.platform_execute_tuning = sdhci_msm_execute_tuning,
+	.enhanced_strobe = sdhci_msm_enhanced_strobe,
+	.toggle_cdr = sdhci_msm_toggle_cdr,
+	.get_max_segments = sdhci_msm_max_segs,
 	.set_clock = sdhci_msm_set_clock,
 	.get_min_clock = sdhci_msm_get_min_clock,
 	.get_max_clock = sdhci_msm_get_max_clock,
+	.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
+	.config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
+	.enable_controller_clock = sdhci_msm_enable_controller_clock,
 	.set_bus_width = sdhci_set_bus_width,
-	.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
-	.write_w = sdhci_msm_writew,
-	.write_b = sdhci_msm_writeb,
+	.reset = sdhci_msm_reset,
+	.clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
+	.enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
+	.reset_workaround = sdhci_msm_reset_workaround,
+	.init = sdhci_msm_init,
+	.pre_req = sdhci_msm_pre_req,
+	.post_req = sdhci_msm_post_req,
+	.get_current_limit = sdhci_msm_get_current_limit,
+	.notify_load = sdhci_msm_notify_load,
 };
 
-static const struct sdhci_pltfm_data sdhci_msm_pdata = {
-	.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
-		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
-		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
-	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
-	.ops = &sdhci_msm_ops,
-};
+static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
+		struct sdhci_host *host)
+{
+	u32 version, caps = 0;
+	u16 minor;
+	u8 major;
+	u32 val;
+	const struct sdhci_msm_offset *msm_host_offset =
+					msm_host->offset;
+
+	version = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_MCI_VERSION);
+	major = (version & CORE_VERSION_MAJOR_MASK) >>
+			CORE_VERSION_MAJOR_SHIFT;
+	minor = version & CORE_VERSION_TARGET_MASK;
+
+	caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+
+	/*
+	 * Starting with SDCC 5 controller (core major version = 1)
+	 * controller won't advertise 3.0v, 1.8v and 8-bit features
+	 * except for some targets.
+	 */
+	if (major >= 1 && minor != 0x11 && minor != 0x12) {
+		struct sdhci_msm_reg_data *vdd_io_reg;
+		/*
+		 * Enable 1.8V support capability on controllers that
+		 * support dual voltage
+		 */
+		vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
+		if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
+			caps |= CORE_3_0V_SUPPORT;
+		if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
+			caps |= CORE_1_8V_SUPPORT;
+		if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
+			caps |= CORE_8_BIT_SUPPORT;
+	}
+
+	/*
+	 * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
+	 * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
+	 */
+	if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
+		host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
+		val = readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+		writel_relaxed((val | CORE_ONE_MID_EN),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+	}
+	/*
+	 * SDCC 5 controller with major version 1, minor version 0x34 and later
+	 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+	 */
+	if ((major == 1) && (minor < 0x34))
+		msm_host->use_cdclp533 = true;
+
+	/*
+	 * SDCC 5 controller with major version 1, minor version 0x42 and later
+	 * will require additional steps when resetting DLL.
+	 * It also supports HS400 enhanced strobe mode.
+	 */
+	if ((major == 1) && (minor >= 0x42)) {
+		msm_host->use_updated_dll_reset = true;
+		msm_host->enhanced_strobe = true;
+	}
+
+	/*
+	 * SDCC 5 controller with major version 1 and minor version 0x42,
+	 * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
+	 * gating cannot guarantee MCLK timing requirement i.e.
+	 * when MCLK is gated OFF, it is not gated for less than 0.5us
+	 * and MCLK must be switched on for at-least 1us before DATA
+	 * starts coming.
+	 */
+	if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
+				(minor == 0x49) || (minor >= 0x6b)))
+		msm_host->use_14lpp_dll = true;
+
+	/* Fake 3.0V support for SDIO devices which requires such voltage */
+	if (msm_host->core_3_0v_support) {
+		caps |= CORE_3_0V_SUPPORT;
+			writel_relaxed((readl_relaxed(host->ioaddr +
+			SDHCI_CAPABILITIES) | caps), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+	}
+
+	if ((major == 1) && (minor >= 0x49))
+		msm_host->rclk_delay_fix = true;
+	/*
+	 * Mask 64-bit support for controller with 32-bit address bus so that
+	 * smaller descriptor size will be used and improve memory consumption.
+	 */
+	if (!msm_host->pdata->largeaddressbus)
+		caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
+
+	writel_relaxed(caps, host->ioaddr +
+		msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+	/* keep track of the value in SDHCI_CAPABILITIES */
+	msm_host->caps_0 = caps;
+
+	if ((major == 1) && (minor >= 0x6b)) {
+		msm_host->ice_hci_support = true;
+		host->cdr_support = true;
+	}
+
+	/* 7FF projects with 7nm DLL */
+	if ((major == 1) && ((minor == 0x6e) || (minor == 0x71) ||
+				(minor == 0x72)))
+		msm_host->use_7nm_dll = true;
+}
+
+static bool sdhci_msm_is_bootdevice(struct device *dev)
+{
+	if (strnstr(saved_command_line, "androidboot.bootdevice=",
+		    strlen(saved_command_line))) {
+		char search_string[50];
+
+		snprintf(search_string, ARRAY_SIZE(search_string),
+			"androidboot.bootdevice=%s", dev_name(dev));
+		if (strnstr(saved_command_line, search_string,
+		    strlen(saved_command_line)))
+			return true;
+		else
+			return false;
+	}
+
+	/*
+	 * "androidboot.bootdevice=" argument is not present then
+	 * return true as we don't know the boot device anyways.
+	 */
+	return true;
+}
 
 static int sdhci_msm_probe(struct platform_device *pdev)
 {
+	const struct sdhci_msm_offset *msm_host_offset;
 	struct sdhci_host *host;
 	struct sdhci_pltfm_host *pltfm_host;
 	struct sdhci_msm_host *msm_host;
-	struct resource *core_memres;
-	struct clk *clk;
-	int ret;
-	u16 host_version, core_minor;
-	u32 core_version, config;
-	u8 core_major;
-	const struct sdhci_msm_offset *msm_offset;
-	const struct sdhci_msm_variant_info *var_info;
+	struct resource *core_memres = NULL;
+	int ret = 0, dead = 0;
+	u16 host_version;
+	u32 irq_status, irq_ctl;
+	struct resource *tlmm_memres = NULL;
+	void __iomem *tlmm_mem;
+	unsigned long flags;
+	bool force_probe;
 
-	host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
-	if (IS_ERR(host))
-		return PTR_ERR(host);
+	pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
+	msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
+				GFP_KERNEL);
+	if (!msm_host) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
-	host->sdma_boundary = 0;
+	if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
+		msm_host->mci_removed = true;
+		msm_host->offset = &sdhci_msm_offset_mci_removed;
+	} else {
+		msm_host->mci_removed = false;
+		msm_host->offset = &sdhci_msm_offset_mci_present;
+	}
+	msm_host_offset = msm_host->offset;
+	msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
+	host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+	if (IS_ERR(host)) {
+		ret = PTR_ERR(host);
+		goto out_host_free;
+	}
+
 	pltfm_host = sdhci_priv(host);
-	msm_host = sdhci_pltfm_priv(pltfm_host);
+	pltfm_host->priv = msm_host;
 	msm_host->mmc = host->mmc;
 	msm_host->pdev = pdev;
 
-	ret = mmc_of_parse(host->mmc);
-	if (ret)
+	/* get the ice device vops if present */
+	ret = sdhci_msm_ice_get_dev(host);
+	if (ret == -EPROBE_DEFER) {
+		/*
+		 * SDHCI driver might be probed before ICE driver does.
+		 * In that case we would like to return EPROBE_DEFER code
+		 * in order to delay its probing.
+		 */
+		dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
+			__func__, ret);
 		goto pltfm_free;
 
-	/*
-	 * Based on the compatible string, load the required msm host info from
-	 * the data associated with the version info.
-	 */
-	var_info = of_device_get_match_data(&pdev->dev);
+	} else if (ret == -ENODEV) {
+		/*
+		 * ICE device is not enabled in DTS file. No need for further
+		 * initialization of ICE driver.
+		 */
+		dev_warn(&pdev->dev, "%s: ICE device is not enabled\n",
+			__func__);
+	} else if (ret) {
+		dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
+			__func__, ret);
+		goto pltfm_free;
+	}
 
-	msm_host->mci_removed = var_info->mci_removed;
-	msm_host->var_ops = var_info->var_ops;
-	msm_host->offset = var_info->offset;
+	/* Extract platform data */
+	if (pdev->dev.of_node) {
+		ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
+		if (ret <= 0) {
+			dev_err(&pdev->dev, "Failed to get slot index %d\n",
+				ret);
+			goto pltfm_free;
+		}
 
-	msm_offset = msm_host->offset;
+		/* Read property to determine if the probe is forced */
+		force_probe = of_find_property(pdev->dev.of_node,
+			"qcom,force-sdhc1-probe", NULL);
 
-	sdhci_get_of_property(pdev);
+		/* skip the probe if eMMC isn't a boot device */
+		if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)
+		    && !force_probe) {
+			ret = -ENODEV;
+			goto pltfm_free;
+		}
 
-	msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+		if (disable_slots & (1 << (ret - 1))) {
+			dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
+				ret);
+			ret = -ENODEV;
+			goto pltfm_free;
+		}
+
+		if (ret <= 2)
+			sdhci_slot[ret-1] = msm_host;
+
+		msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
+							   msm_host);
+		if (!msm_host->pdata) {
+			dev_err(&pdev->dev, "DT parsing error\n");
+			goto pltfm_free;
+		}
+	} else {
+		dev_err(&pdev->dev, "No device tree node\n");
+		goto pltfm_free;
+	}
+
+	/* Setup Clocks */
 
 	/* Setup SDCC bus voter clock. */
-	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
-	if (!IS_ERR(msm_host->bus_clk)) {
+	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
 		/* Vote for max. clk rate for max. performance */
 		ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
 		if (ret)
@@ -1711,121 +4712,191 @@
 	}
 
 	/* Setup main peripheral bus clock */
-	clk = devm_clk_get(&pdev->dev, "iface");
-	if (IS_ERR(clk)) {
-		ret = PTR_ERR(clk);
-		dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
-		goto bus_clk_disable;
+	msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (!IS_ERR(msm_host->pclk)) {
+		ret = clk_prepare_enable(msm_host->pclk);
+		if (ret)
+			goto bus_clk_disable;
 	}
-	msm_host->bulk_clks[1].clk = clk;
+	atomic_set(&msm_host->controller_clock, 1);
 
-	/* Setup SDC MMC clock */
-	clk = devm_clk_get(&pdev->dev, "core");
-	if (IS_ERR(clk)) {
-		ret = PTR_ERR(clk);
-		dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
-		goto bus_clk_disable;
-	}
-	msm_host->bulk_clks[0].clk = clk;
-
-	/* Vote for maximum clock rate for maximum performance */
-	ret = clk_set_rate(clk, INT_MAX);
-	if (ret)
-		dev_warn(&pdev->dev, "core clock boost failed\n");
-
-	clk = devm_clk_get(&pdev->dev, "cal");
-	if (IS_ERR(clk))
-		clk = NULL;
-	msm_host->bulk_clks[2].clk = clk;
-
-	clk = devm_clk_get(&pdev->dev, "sleep");
-	if (IS_ERR(clk))
-		clk = NULL;
-	msm_host->bulk_clks[3].clk = clk;
-
-	ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
-				      msm_host->bulk_clks);
-	if (ret)
-		goto bus_clk_disable;
-
-	/*
-	 * xo clock is needed for FLL feature of cm_dll.
-	 * In case if xo clock is not mentioned in DT, warn and proceed.
-	 */
-	msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
-	if (IS_ERR(msm_host->xo_clk)) {
-		ret = PTR_ERR(msm_host->xo_clk);
-		dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
-	}
-
-	if (!msm_host->mci_removed) {
-		core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-		msm_host->core_mem = devm_ioremap_resource(&pdev->dev,
-				core_memres);
-
-		if (IS_ERR(msm_host->core_mem)) {
-			ret = PTR_ERR(msm_host->core_mem);
-			goto clk_disable;
+	/* Setup SDC ufs bus aggr clock */
+	msm_host->bus_aggr_clk = devm_clk_get(&pdev->dev, "bus_aggr_clk");
+	if (!IS_ERR(msm_host->bus_aggr_clk)) {
+		ret = clk_prepare_enable(msm_host->bus_aggr_clk);
+		if (ret) {
+			dev_err(&pdev->dev, "Bus aggregate clk not enabled\n");
+			goto pclk_disable;
 		}
 	}
 
-	/* Reset the vendor spec register to power on reset state */
+	if (msm_host->ice.pdev) {
+		/* Setup SDC ICE clock */
+		msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
+		if (!IS_ERR(msm_host->ice_clk)) {
+			/* ICE core has only one clock frequency for now */
+			ret = clk_set_rate(msm_host->ice_clk,
+					msm_host->pdata->ice_clk_max);
+			if (ret) {
+				dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
+					ret,
+					msm_host->pdata->ice_clk_max);
+				goto bus_aggr_clk_disable;
+			}
+			ret = clk_prepare_enable(msm_host->ice_clk);
+			if (ret)
+				goto bus_aggr_clk_disable;
+
+			msm_host->ice_clk_rate =
+				msm_host->pdata->ice_clk_max;
+		}
+	}
+
+	/* Setup SDC MMC clock */
+	msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(msm_host->clk)) {
+		ret = PTR_ERR(msm_host->clk);
+		goto bus_aggr_clk_disable;
+	}
+
+	/* Set to the minimum supported clock frequency */
+	ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
+	if (ret) {
+		dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
+		goto bus_aggr_clk_disable;
+	}
+	ret = clk_prepare_enable(msm_host->clk);
+	if (ret)
+		goto bus_aggr_clk_disable;
+
+	msm_host->clk_rate = sdhci_msm_get_min_clock(host);
+	atomic_set(&msm_host->clks_on, 1);
+
+	/* Setup CDC calibration fixed feedback clock */
+	msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
+	if (!IS_ERR(msm_host->ff_clk)) {
+		ret = clk_prepare_enable(msm_host->ff_clk);
+		if (ret)
+			goto clk_disable;
+	}
+
+	/* Setup CDC calibration sleep clock */
+	msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+	if (!IS_ERR(msm_host->sleep_clk)) {
+		ret = clk_prepare_enable(msm_host->sleep_clk);
+		if (ret)
+			goto ff_clk_disable;
+	}
+
+	msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
+	ret = sdhci_msm_bus_register(msm_host, pdev);
+	if (ret)
+		goto sleep_clk_disable;
+
+	if (msm_host->msm_bus_vote.client_handle)
+		INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
+				  sdhci_msm_bus_work);
+	sdhci_msm_bus_voting(host, 1);
+
+	/* Setup regulators */
+	ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
+	if (ret) {
+		dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
+		goto bus_unregister;
+	}
+
+	/* Reset the core and Enable SDHC mode */
+	core_memres = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	if (!msm_host->mci_removed) {
+		if (!core_memres) {
+			dev_err(&pdev->dev, "Failed to get iomem resource\n");
+			goto vreg_deinit;
+		}
+		msm_host->core_mem = devm_ioremap(&pdev->dev,
+			core_memres->start, resource_size(core_memres));
+
+		if (!msm_host->core_mem) {
+			dev_err(&pdev->dev, "Failed to remap registers\n");
+			ret = -ENOMEM;
+			goto vreg_deinit;
+		}
+	}
+
+	tlmm_memres = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "tlmm_mem");
+	if (tlmm_memres) {
+		tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
+						resource_size(tlmm_memres));
+
+		if (!tlmm_mem) {
+			dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
+			ret = -ENOMEM;
+			goto vreg_deinit;
+		}
+		writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
+	}
+
+	/*
+	 * Reset the vendor spec register to power on reset state.
+	 */
 	writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
-			host->ioaddr + msm_offset->core_vendor_spec);
+	host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
+	/* This enable ADMA error interrupt in case of length mismatch */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) |
+			CORE_VNDR_SPEC_ADMA_ERR_SIZE_EN),
+			host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
+	/*
+	 * Ensure SDHCI FIFO is enabled by disabling alternative FIFO
+	 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3) &
+			~CORE_FIFO_ALT_EN), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC3);
 
 	if (!msm_host->mci_removed) {
 		/* Set HC_MODE_EN bit in HC_MODE register */
-		msm_host_writel(msm_host, HC_MODE_EN, host,
-				msm_offset->core_hc_mode);
-		config = msm_host_readl(msm_host, host,
-				msm_offset->core_hc_mode);
-		config |= FF_CLK_SW_RST_DIS;
-		msm_host_writel(msm_host, config, host,
-				msm_offset->core_hc_mode);
+		writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+
+		/* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
+		writel_relaxed(readl_relaxed(msm_host->core_mem +
+				CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
+				msm_host->core_mem + CORE_HC_MODE);
 	}
-
-	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
-	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
-		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
-			       SDHCI_VENDOR_VER_SHIFT));
-
-	core_version = msm_host_readl(msm_host, host,
-			msm_offset->core_mci_version);
-	core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
-		      CORE_VERSION_MAJOR_SHIFT;
-	core_minor = core_version & CORE_VERSION_MINOR_MASK;
-	dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
-		core_version, core_major, core_minor);
-
-	if (core_major == 1 && core_minor >= 0x42)
-		msm_host->use_14lpp_dll_reset = true;
+	sdhci_set_default_hw_caps(msm_host, host);
 
 	/*
-	 * SDCC 5 controller with major version 1, minor version 0x34 and later
-	 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+	 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH bit can
+	 * be used as required later on.
 	 */
-	if (core_major == 1 && core_minor < 0x34)
-		msm_host->use_cdclp533 = true;
-
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC) |
+			CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
+			msm_host_offset->CORE_VENDOR_SPEC);
 	/*
-	 * Support for some capabilities is not advertised by newer
-	 * controller versions and must be explicitly enabled.
+	 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
+	 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
+	 * interrupt in GIC (by registering the interrupt handler), we need to
+	 * ensure that any pending power irq interrupt status is acknowledged
+	 * otherwise power irq interrupt handler would be fired prematurely.
 	 */
-	if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
-		config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
-		config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
-		writel_relaxed(config, host->ioaddr +
-				msm_offset->core_vendor_spec_capabilities0);
-	}
+	irq_status = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_STATUS);
+	sdhci_msm_writel_relaxed(irq_status, host,
+		msm_host_offset->CORE_PWRCTL_CLEAR);
+	irq_ctl = sdhci_msm_readl_relaxed(host,
+		msm_host_offset->CORE_PWRCTL_CTL);
 
-	/*
-	 * Power on reset state may trigger power irq if previous status of
-	 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
-	 * interrupt in GIC, any pending power irq interrupt should be
-	 * acknowledged. Otherwise power irq interrupt handler would be
-	 * fired prematurely.
-	 */
-	sdhci_msm_handle_pwr_irq(host, 0);
+	if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+		irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
+	if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
+		irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
+	sdhci_msm_writel_relaxed(irq_ctl, host,
+		msm_host_offset->CORE_PWRCTL_CTL);
 
 	/*
 	 * Ensure that above writes are propogated before interrupt enablement
@@ -1833,58 +4904,254 @@
 	 */
 	mb();
 
-	/* Setup IRQ for handling power/voltage tasks with PMIC */
-	msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
-	if (msm_host->pwr_irq < 0) {
-		dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
-			msm_host->pwr_irq);
-		ret = msm_host->pwr_irq;
-		goto clk_disable;
+	/*
+	 * Following are the deviations from SDHC spec v3.0 -
+	 * 1. Card detection is handled using separate GPIO.
+	 * 2. Bus power control is handled by interacting with PMIC.
+	 */
+	host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+	host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+	host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+	host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
+	host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+	host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+	host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
+	host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
+	host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
+
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+		host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
+
+	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
+		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
+		  SDHCI_VENDOR_VER_SHIFT));
+	if (((host_version & SDHCI_VENDOR_VER_MASK) >>
+		SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
+		/*
+		 * Add 40us delay in interrupt handler when
+		 * operating at initialization frequency(400KHz).
+		 */
+		host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
+		/*
+		 * Set Software Reset for DAT line in Software
+		 * Reset Register (Bit 2).
+		 */
+		host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
 	}
 
-	sdhci_msm_init_pwr_irq_wait(msm_host);
-	/* Enable pwr irq interrupts */
-	msm_host_writel(msm_host, INT_MASK, host,
-		msm_offset->core_pwrctl_mask);
+	host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
 
+	/* Setup PWRCTL irq */
+	msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+	if (msm_host->pwr_irq < 0) {
+		dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
+				msm_host->pwr_irq);
+		goto vreg_deinit;
+	}
 	ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
 					sdhci_msm_pwr_irq, IRQF_ONESHOT,
 					dev_name(&pdev->dev), host);
 	if (ret) {
-		dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
-		goto clk_disable;
+		dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
+				msm_host->pwr_irq, ret);
+		goto vreg_deinit;
 	}
 
-	pm_runtime_get_noresume(&pdev->dev);
+	/* Enable pwr irq interrupts */
+	sdhci_msm_writel_relaxed(INT_MASK, host,
+		msm_host_offset->CORE_PWRCTL_MASK);
+
+#ifdef CONFIG_MMC_CLKGATE
+	/* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
+	msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
+#endif
+
+	/* Set host capabilities */
+	msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
+	msm_host->mmc->caps |= msm_host->pdata->caps;
+	msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+	msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+	msm_host->mmc->caps2 |= msm_host->pdata->caps2;
+	msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+	msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
+	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+	msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
+	msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
+	msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
+	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+
+	if (msm_host->pdata->nonremovable)
+		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+	if (msm_host->pdata->nonhotplug)
+		msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
+
+	/* Initialize ICE if present */
+	if (msm_host->ice.pdev) {
+		ret = sdhci_msm_ice_init(host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
+					mmc_hostname(host->mmc), ret);
+			ret = -EINVAL;
+			goto vreg_deinit;
+		}
+		host->is_crypto_en = true;
+		msm_host->mmc->inlinecrypt_support = true;
+		/* Packed commands cannot be encrypted/decrypted using ICE */
+		msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
+				MMC_CAP2_PACKED_WR_CONTROL);
+	}
+
+	init_completion(&msm_host->pwr_irq_completion);
+
+	if (gpio_is_valid(msm_host->pdata->status_gpio)) {
+		/*
+		 * Set up the card detect GPIO in active configuration before
+		 * configuring it as an IRQ. Otherwise, it can be in some
+		 * weird/inconsistent state resulting in flood of interrupts.
+		 */
+		sdhci_msm_setup_pins(msm_host->pdata, true);
+
+		/*
+		 * This delay is needed for stabilizing the card detect GPIO
+		 * line after changing the pull configs.
+		 */
+		usleep_range(10000, 10500);
+		ret = mmc_gpio_request_cd(msm_host->mmc,
+				msm_host->pdata->status_gpio, 0);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
+					__func__, ret);
+			goto vreg_deinit;
+		}
+	}
+
+	if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
+		(dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
+		host->dma_mask = DMA_BIT_MASK(64);
+		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+		mmc_dev(host->mmc)->coherent_dma_mask  = host->dma_mask;
+	} else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
+		host->dma_mask = DMA_BIT_MASK(32);
+		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+		mmc_dev(host->mmc)->coherent_dma_mask  = host->dma_mask;
+	} else {
+		dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
+	}
+
+	msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
+							  "sdiowakeup_irq");
+	if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+		dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
+				msm_host->pdata->sdiowakeup_irq);
+		msm_host->is_sdiowakeup_enabled = true;
+		ret = request_irq(msm_host->pdata->sdiowakeup_irq,
+				  sdhci_msm_sdiowakeup_irq,
+				  IRQF_SHARED | IRQF_TRIGGER_HIGH,
+				  "sdhci-msm sdiowakeup", host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
+				__func__, msm_host->pdata->sdiowakeup_irq, ret);
+			msm_host->pdata->sdiowakeup_irq = -1;
+			msm_host->is_sdiowakeup_enabled = false;
+			goto vreg_deinit;
+		} else {
+			spin_lock_irqsave(&host->lock, flags);
+			sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+			msm_host->sdio_pending_processing = false;
+			spin_unlock_irqrestore(&host->lock, flags);
+		}
+	}
+
+	ret = sdhci_add_host(host);
+	if (ret) {
+		dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
+		goto vreg_deinit;
+	}
+
+	msm_host->pltfm_init_done = true;
+
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
-	pm_runtime_set_autosuspend_delay(&pdev->dev,
-					 MSM_MMC_AUTOSUSPEND_DELAY_MS);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
 	pm_runtime_use_autosuspend(&pdev->dev);
 
-	host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
-	ret = sdhci_add_host(host);
+	msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
+	msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
+	sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
+	msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+	msm_host->msm_bus_vote.max_bus_bw.attr.mode = 0644;
+	ret = device_create_file(&pdev->dev,
+			&msm_host->msm_bus_vote.max_bus_bw);
 	if (ret)
-		goto pm_runtime_disable;
-	sdhci_msm_set_regulator_caps(msm_host);
+		goto remove_host;
 
-	pm_runtime_mark_last_busy(&pdev->dev);
-	pm_runtime_put_autosuspend(&pdev->dev);
+	if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
+		msm_host->polling.show = show_polling;
+		msm_host->polling.store = store_polling;
+		sysfs_attr_init(&msm_host->polling.attr);
+		msm_host->polling.attr.name = "polling";
+		msm_host->polling.attr.mode = 0644;
+		ret = device_create_file(&pdev->dev, &msm_host->polling);
+		if (ret)
+			goto remove_max_bus_bw_file;
+	}
 
-	return 0;
+	msm_host->auto_cmd21_attr.show = show_auto_cmd21;
+	msm_host->auto_cmd21_attr.store = store_auto_cmd21;
+	sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
+	msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
+	msm_host->auto_cmd21_attr.attr.mode = 0644;
+	ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	if (ret) {
+		pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
+		       mmc_hostname(host->mmc), __func__, ret);
+		device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	}
+	if (sdhci_msm_is_bootdevice(&pdev->dev))
+		mmc_flush_detect_work(host->mmc);
 
-pm_runtime_disable:
+	/* Successful initialization */
+	goto out;
+
+remove_max_bus_bw_file:
+	device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+remove_host:
+	dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
 	pm_runtime_disable(&pdev->dev);
-	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_put_noidle(&pdev->dev);
+	sdhci_remove_host(host, dead);
+vreg_deinit:
+	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+bus_unregister:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	sdhci_msm_bus_unregister(msm_host);
+sleep_clk_disable:
+	if (!IS_ERR(msm_host->sleep_clk))
+		clk_disable_unprepare(msm_host->sleep_clk);
+ff_clk_disable:
+	if (!IS_ERR(msm_host->ff_clk))
+		clk_disable_unprepare(msm_host->ff_clk);
 clk_disable:
-	clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
-				   msm_host->bulk_clks);
+	if (!IS_ERR(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
+bus_aggr_clk_disable:
+	if (!IS_ERR(msm_host->bus_aggr_clk))
+		clk_disable_unprepare(msm_host->bus_aggr_clk);
+pclk_disable:
+	if (!IS_ERR(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
 bus_clk_disable:
-	if (!IS_ERR(msm_host->bus_clk))
+	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
 		clk_disable_unprepare(msm_host->bus_clk);
 pltfm_free:
 	sdhci_pltfm_free(pdev);
+out_host_free:
+	devm_kfree(&pdev->dev, msm_host);
+out:
+	pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
 	return ret;
 }
 
@@ -1892,34 +5159,151 @@
 {
 	struct sdhci_host *host = platform_get_drvdata(pdev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
+	int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+	int i;
 	int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
-		    0xffffffff);
+			0xffffffff);
+
+	pr_debug("%s: %s Enter\n", dev_name(&pdev->dev), __func__);
+	if (!gpio_is_valid(msm_host->pdata->status_gpio))
+		device_remove_file(&pdev->dev, &msm_host->polling);
+
+	device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+	pm_runtime_disable(&pdev->dev);
+
+	if (msm_host->pm_qos_group_enable) {
+		struct sdhci_msm_pm_qos_group *group;
+
+		for (i = 0; i < nr_groups; i++)
+			cancel_delayed_work_sync(
+					&msm_host->pm_qos[i].unvote_work);
+
+		device_remove_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_group_enable_attr);
+		device_remove_file(&msm_host->pdev->dev,
+			&msm_host->pm_qos_group_status_attr);
+
+		for (i = 0; i < nr_groups; i++) {
+			group = &msm_host->pm_qos[i];
+			pm_qos_remove_request(&group->req);
+		}
+	}
+
+	if (msm_host->pm_qos_irq.enabled) {
+		cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+		device_remove_file(&pdev->dev,
+				&msm_host->pm_qos_irq.enable_attr);
+		device_remove_file(&pdev->dev,
+				&msm_host->pm_qos_irq.status_attr);
+		pm_qos_remove_request(&msm_host->pm_qos_irq.req);
+	}
+
+	if (msm_host->pm_qos_wq)
+		destroy_workqueue(msm_host->pm_qos_wq);
 
 	sdhci_remove_host(host, dead);
 
-	pm_runtime_get_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-	pm_runtime_put_noidle(&pdev->dev);
+	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
 
-	clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
-				   msm_host->bulk_clks);
-	if (!IS_ERR(msm_host->bus_clk))
-		clk_disable_unprepare(msm_host->bus_clk);
+	sdhci_msm_setup_pins(pdata, true);
+	sdhci_msm_setup_pins(pdata, false);
+
+	if (msm_host->msm_bus_vote.client_handle) {
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+		sdhci_msm_bus_unregister(msm_host);
+	}
+
 	sdhci_pltfm_free(pdev);
+
 	return 0;
 }
 
 #ifdef CONFIG_PM
+static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
+	      sdhci_is_valid_gpio_wakeup_int(msm_host) &&
+	      mmc_card_wake_sdio_irq(host->mmc))) {
+		msm_host->sdio_pending_processing = false;
+		return 1;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (enable) {
+		/* configure DAT1 gpio if applicable */
+		if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+			msm_host->sdio_pending_processing = false;
+			ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+			if (!ret)
+				sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
+			goto out;
+		} else {
+			pr_err("%s: sdiowakeup_irq(%d) invalid\n",
+					mmc_hostname(host->mmc), enable);
+		}
+	} else {
+		if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+			ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+			sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+			msm_host->sdio_pending_processing = false;
+		} else {
+			pr_err("%s: sdiowakeup_irq(%d)invalid\n",
+					mmc_hostname(host->mmc), enable);
+
+		}
+	}
+out:
+	if (ret)
+		pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
+		       mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
+		       ret, msm_host->pdata->sdiowakeup_irq);
+	spin_unlock_irqrestore(&host->lock, flags);
+	return ret;
+}
+
+
 static int sdhci_msm_runtime_suspend(struct device *dev)
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	ktime_t start = ktime_get();
+	int ret;
 
-	clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
-				   msm_host->bulk_clks);
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+		goto defer_disable_host_irq;
 
+	sdhci_cfg_irq(host, false, true);
+
+defer_disable_host_irq:
+	disable_irq(msm_host->pwr_irq);
+
+	/*
+	 * Remove the vote immediately only if clocks are off in which
+	 * case we might have queued work to remove vote but it may not
+	 * be completed before runtime suspend or system suspend.
+	 */
+	if (!atomic_read(&msm_host->clks_on)) {
+		if (msm_host->msm_bus_vote.client_handle)
+			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	}
+
+	if (host->is_crypto_en) {
+		ret = sdhci_msm_ice_suspend(host);
+		if (ret < 0)
+			pr_err("%s: failed to suspend crypto engine %d\n",
+					mmc_hostname(host->mmc), ret);
+	}
+	trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
 	return 0;
 }
 
@@ -1927,32 +5311,156 @@
 {
 	struct sdhci_host *host = dev_get_drvdata(dev);
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-	struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	ktime_t start = ktime_get();
+	int ret;
 
-	return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
-				       msm_host->bulk_clks);
+	if (host->is_crypto_en) {
+		ret = sdhci_msm_enable_controller_clock(host);
+		if (ret) {
+			pr_err("%s: Failed to enable reqd clocks\n",
+					mmc_hostname(host->mmc));
+			goto skip_ice_resume;
+		}
+		ret = sdhci_msm_ice_resume(host);
+		if (ret)
+			pr_err("%s: failed to resume crypto engine %d\n",
+					mmc_hostname(host->mmc), ret);
+	}
+skip_ice_resume:
+
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+		goto defer_enable_host_irq;
+
+	sdhci_cfg_irq(host, true, true);
+
+defer_enable_host_irq:
+	enable_irq(msm_host->pwr_irq);
+
+	trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return 0;
 }
-#endif
 
-static const struct dev_pm_ops sdhci_msm_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-				pm_runtime_force_resume)
-	SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
-			   sdhci_msm_runtime_resume,
+static int sdhci_msm_suspend(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	int sdio_cfg = 0;
+	ktime_t start = ktime_get();
+
+	if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+			 (msm_host->mmc->slot.cd_irq >= 0))
+		disable_irq(msm_host->mmc->slot.cd_irq);
+
+	if (pm_runtime_suspended(dev)) {
+		pr_debug("%s: %s: already runtime suspended\n",
+		mmc_hostname(host->mmc), __func__);
+		goto out;
+	}
+	ret = sdhci_msm_runtime_suspend(dev);
+out:
+	sdhci_msm_disable_controller_clock(host);
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+		sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
+		if (sdio_cfg)
+			sdhci_cfg_irq(host, false, true);
+	}
+
+	trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return ret;
+}
+
+static int sdhci_msm_resume(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	int sdio_cfg = 0;
+	ktime_t start = ktime_get();
+
+	if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+			 (msm_host->mmc->slot.cd_irq >= 0))
+		enable_irq(msm_host->mmc->slot.cd_irq);
+
+	if (pm_runtime_suspended(dev)) {
+		pr_debug("%s: %s: runtime suspended, defer system resume\n",
+		mmc_hostname(host->mmc), __func__);
+		goto out;
+	}
+
+	ret = sdhci_msm_runtime_resume(dev);
+out:
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+		sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
+		if (sdio_cfg)
+			sdhci_cfg_irq(host, true, true);
+	}
+
+	trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
+	return ret;
+}
+
+static int sdhci_msm_suspend_noirq(struct device *dev)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+
+	/*
+	 * ksdioirqd may be running, hence retry
+	 * suspend in case the clocks are ON
+	 */
+	if (atomic_read(&msm_host->clks_on)) {
+		pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
+			mmc_hostname(host->mmc), __func__);
+		ret = -EAGAIN;
+	}
+
+	if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+		if (msm_host->sdio_pending_processing)
+			ret = -EBUSY;
+
+	return ret;
+}
+
+static const struct dev_pm_ops sdhci_msm_pmops = {
+	SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
+	SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
 			   NULL)
+	.suspend_noirq = sdhci_msm_suspend_noirq,
 };
 
+#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
+
+#else
+#define SDHCI_MSM_PMOPS NULL
+#endif
+static const struct of_device_id sdhci_msm_dt_match[] = {
+	{.compatible = "qcom,sdhci-msm"},
+	{.compatible = "qcom,sdhci-msm-v5"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
 static struct platform_driver sdhci_msm_driver = {
-	.probe = sdhci_msm_probe,
-	.remove = sdhci_msm_remove,
-	.driver = {
-		   .name = "sdhci_msm",
-		   .of_match_table = sdhci_msm_dt_match,
-		   .pm = &sdhci_msm_pm_ops,
+	.probe		= sdhci_msm_probe,
+	.remove		= sdhci_msm_remove,
+	.driver		= {
+		.name	= "sdhci_msm",
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+		.of_match_table = sdhci_msm_dt_match,
+		.pm	= SDHCI_MSM_PMOPS,
 	},
 };
 
 module_platform_driver(sdhci_msm_driver);
 
-MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
new file mode 100644
index 0000000..902edd4
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SDHCI_MSM_H__
+#define __SDHCI_MSM_H__
+
+#include <linux/mmc/mmc.h>
+#include <linux/pm_qos.h>
+#include "sdhci-pltfm.h"
+
+/* This structure keeps information per regulator */
+struct sdhci_msm_reg_data {
+	/* voltage regulator handle */
+	struct regulator *reg;
+	/* regulator name */
+	const char *name;
+	/* voltage level to be set */
+	u32 low_vol_level;
+	u32 high_vol_level;
+	/* Load values for low power and high power mode */
+	u32 lpm_uA;
+	u32 hpm_uA;
+
+	/* is this regulator enabled? */
+	bool is_enabled;
+	/* is this regulator needs to be always on? */
+	bool is_always_on;
+	/* is low power mode setting required for this regulator? */
+	bool lpm_sup;
+	bool set_voltage_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct sdhci_msm_slot_reg_data {
+	/* keeps VDD/VCC regulator info */
+	struct sdhci_msm_reg_data *vdd_data;
+	 /* keeps VDD IO regulator info */
+	struct sdhci_msm_reg_data *vdd_io_data;
+};
+
+struct sdhci_msm_gpio {
+	u32 no;
+	const char *name;
+	bool is_enabled;
+};
+
+struct sdhci_msm_gpio_data {
+	struct sdhci_msm_gpio *gpio;
+	u8 size;
+};
+
+struct sdhci_msm_pin_data {
+	/*
+	 * = 1 if controller pins are using gpios
+	 * = 0 if controller has dedicated MSM pads
+	 */
+	u8 is_gpio;
+	struct sdhci_msm_gpio_data *gpio_data;
+};
+
+struct sdhci_pinctrl_data {
+	struct pinctrl          *pctrl;
+	struct pinctrl_state    *pins_active;
+	struct pinctrl_state    *pins_sleep;
+	struct pinctrl_state    *pins_drv_type_400KHz;
+	struct pinctrl_state    *pins_drv_type_50MHz;
+	struct pinctrl_state    *pins_drv_type_100MHz;
+	struct pinctrl_state    *pins_drv_type_200MHz;
+};
+
+struct sdhci_msm_bus_voting_data {
+	struct msm_bus_scale_pdata *bus_pdata;
+	unsigned int *bw_vecs;
+	unsigned int bw_vecs_size;
+};
+
+struct sdhci_msm_cpu_group_map {
+	int nr_groups;
+	cpumask_t *mask;
+};
+
+struct sdhci_msm_pm_qos_latency {
+	s32 latency[SDHCI_POWER_POLICY_NUM];
+};
+
+struct sdhci_msm_pm_qos_data {
+	struct sdhci_msm_cpu_group_map cpu_group_map;
+	enum pm_qos_req_type irq_req_type;
+	int irq_cpu;
+	struct sdhci_msm_pm_qos_latency irq_latency;
+	struct sdhci_msm_pm_qos_latency *cmdq_latency;
+	struct sdhci_msm_pm_qos_latency *latency;
+	bool irq_valid;
+	bool cmdq_valid;
+	bool legacy_valid;
+};
+
+/*
+ * PM QoS for group voting management - each cpu group defined is associated
+ * with 1 instance of this structure.
+ */
+struct sdhci_msm_pm_qos_group {
+	struct pm_qos_request req;
+	struct delayed_work unvote_work;
+	atomic_t counter;
+	s32 latency;
+};
+
+/* PM QoS HW IRQ voting */
+struct sdhci_msm_pm_qos_irq {
+	struct pm_qos_request req;
+	struct delayed_work unvote_work;
+	struct device_attribute enable_attr;
+	struct device_attribute status_attr;
+	atomic_t counter;
+	s32 latency;
+	bool enabled;
+};
+
+struct sdhci_msm_pltfm_data {
+	/* Supported UHS-I Modes */
+	u32 caps;
+
+	/* More capabilities */
+	u32 caps2;
+
+	unsigned long mmc_bus_width;
+	struct sdhci_msm_slot_reg_data *vreg_data;
+	bool nonremovable;
+	bool nonhotplug;
+	bool largeaddressbus;
+	bool pin_cfg_sts;
+	struct sdhci_msm_pin_data *pin_data;
+	struct sdhci_pinctrl_data *pctrl_data;
+	int status_gpio; /* card detection GPIO that is configured as IRQ */
+	struct sdhci_msm_bus_voting_data *voting_data;
+	u32 *sup_clk_table;
+	unsigned char sup_clk_cnt;
+	int sdiowakeup_irq;
+	u32 *sup_ice_clk_table;
+	unsigned char sup_ice_clk_cnt;
+	struct sdhci_msm_pm_qos_data pm_qos_data;
+	u32 ice_clk_max;
+	u32 ice_clk_min;
+	u32 ddr_config;
+	bool rclk_wa;
+	u32 *bus_clk_table;
+	unsigned char bus_clk_cnt;
+};
+
+struct sdhci_msm_bus_vote {
+	uint32_t client_handle;
+	uint32_t curr_vote;
+	int min_bw_vote;
+	int max_bw_vote;
+	bool is_max_bw_needed;
+	struct delayed_work vote_work;
+	struct device_attribute max_bus_bw;
+};
+
+struct sdhci_msm_ice_data {
+	struct qcom_ice_variant_ops *vops;
+	struct platform_device *pdev;
+	int state;
+};
+
+struct sdhci_msm_regs_restore {
+	bool is_supported;
+	bool is_valid;
+	u32 vendor_pwrctl_mask;
+	u32 vendor_pwrctl_ctl;
+	u32 vendor_caps_0;
+	u32 vendor_func;
+	u32 vendor_func2;
+	u32 vendor_func3;
+	u32 hc_2c_2e;
+	u32 hc_28_2a;
+	u32 hc_34_36;
+	u32 hc_38_3a;
+	u32 hc_3c_3e;
+	u32 hc_caps_1;
+	u32 testbus_config;
+	u32 dll_config;
+	u32 dll_config2;
+	u32 dll_config3;
+	u32 dll_usr_ctl;
+};
+
+struct sdhci_msm_debug_data {
+	struct mmc_host copy_mmc;
+	struct mmc_card copy_card;
+	struct sdhci_host copy_host;
+};
+
+struct sdhci_msm_host {
+	struct platform_device	*pdev;
+	void __iomem *core_mem;    /* MSM SDCC mapped address */
+	void __iomem *cryptoio;    /* ICE HCI mapped address */
+	bool ice_hci_support;
+	int	pwr_irq;	/* power irq */
+	struct clk	 *clk;     /* main SD/MMC bus clock */
+	struct clk	 *pclk;    /* SDHC peripheral bus clock */
+	struct clk	 *bus_aggr_clk; /* Axi clock shared with UFS */
+	struct clk	 *bus_clk; /* SDHC bus voter clock */
+	struct clk	 *ff_clk; /* CDC calibration fixed feedback clock */
+	struct clk	 *sleep_clk; /* CDC calibration sleep clock */
+	struct clk	 *ice_clk; /* SDHC peripheral ICE clock */
+	atomic_t clks_on; /* Set if clocks are enabled */
+	struct sdhci_msm_pltfm_data *pdata;
+	struct mmc_host  *mmc;
+	struct sdhci_msm_debug_data cached_data;
+	struct sdhci_pltfm_data sdhci_msm_pdata;
+	u32 curr_pwr_state;
+	u32 curr_io_level;
+	struct completion pwr_irq_completion;
+	struct sdhci_msm_bus_vote msm_bus_vote;
+	struct device_attribute	polling;
+	u32 clk_rate; /* Keeps track of current clock rate that is set */
+	bool tuning_done;
+	bool calibration_done;
+	u8 saved_tuning_phase;
+	bool en_auto_cmd21;
+	struct device_attribute auto_cmd21_attr;
+	bool is_sdiowakeup_enabled;
+	bool sdio_pending_processing;
+	atomic_t controller_clock;
+	bool use_cdclp533;
+	bool use_updated_dll_reset;
+	bool use_14lpp_dll;
+	bool enhanced_strobe;
+	bool rclk_delay_fix;
+	u32 caps_0;
+	struct sdhci_msm_ice_data ice;
+	u32 ice_clk_rate;
+	struct sdhci_msm_pm_qos_group *pm_qos;
+	int pm_qos_prev_cpu;
+	struct device_attribute pm_qos_group_enable_attr;
+	struct device_attribute pm_qos_group_status_attr;
+	bool pm_qos_group_enable;
+	struct sdhci_msm_pm_qos_irq pm_qos_irq;
+	bool tuning_in_progress;
+	bool mci_removed;
+	const struct sdhci_msm_offset *offset;
+	bool core_3_0v_support;
+	bool pltfm_init_done;
+	struct sdhci_msm_regs_restore regs_restore;
+	bool use_7nm_dll;
+	int soc_min_rev;
+	struct workqueue_struct *pm_qos_wq;
+};
+
+extern char *saved_command_line;
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async);
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency);
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+		struct sdhci_msm_pm_qos_latency *latency, int cpu);
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async);
+
+
+#endif /* __SDHCI_MSM_H__ */
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 02bea61..b058d79 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -121,6 +121,7 @@
 	struct resource *iomem;
 	void __iomem *ioaddr;
 	int irq, ret;
+	struct extcon_dev *extcon;
 
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
@@ -156,6 +157,14 @@
 		host->quirks2 = pdata->quirks2;
 	}
 
+	extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+	if (IS_ERR(extcon) && PTR_ERR(extcon) != -ENODEV) {
+		ret = PTR_ERR(extcon);
+		goto err;
+	}
+	if (!IS_ERR(extcon))
+		host->mmc->extcon = extcon;
+
 	platform_set_drvdata(pdev, host);
 
 	return host;
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 1e91fb1..6f9b49f 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -23,6 +23,7 @@
 
 struct sdhci_pltfm_host {
 	struct clk *clk;
+	void *priv; /* to handle quirks across io-accessor calls */
 
 	/* migrate from sdhci_of_host */
 	unsigned int clock;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 1b3fbd9..394fca7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -34,6 +34,8 @@
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/sdio.h>
+#include <trace/events/mmc.h>
 
 #include "sdhci.h"
 
@@ -47,50 +49,85 @@
 
 #define MAX_TUNING_LOOP 40
 
+#define SDHCI_DBG_DUMP_RS_INTERVAL (10 * HZ)
+#define SDHCI_DBG_DUMP_RS_BURST 2
+
 static unsigned int debug_quirks = 0;
 static unsigned int debug_quirks2;
 
 static void sdhci_finish_data(struct sdhci_host *);
-
+static bool sdhci_check_state(struct sdhci_host *);
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable);
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
 
+static void sdhci_dump_state(struct sdhci_host *host)
+{
+	struct mmc_host *mmc = host->mmc;
+
+	#ifdef CONFIG_MMC_CLKGATE
+	pr_info("%s: clk: %d clk-gated: %d claimer: %s pwr: %d host->irq = %d\n",
+		mmc_hostname(mmc), host->clock, mmc->clk_gated,
+		mmc->claimer->task->comm, host->pwr,
+		(host->flags & SDHCI_HOST_IRQ_STATUS));
+	#else
+	pr_info("%s: clk: %d claimer: %s pwr: %d\n",
+		mmc_hostname(mmc), host->clock,
+		mmc->claimer->task->comm, host->pwr);
+	#endif
+	pr_info("%s: rpmstatus[pltfm](runtime-suspend:usage_count:disable_depth)(%d:%d:%d)\n",
+	mmc_hostname(mmc), mmc->parent->power.runtime_status,
+		atomic_read(&mmc->parent->power.usage_count),
+		mmc->parent->power.disable_depth);
+}
+
 void sdhci_dumpregs(struct sdhci_host *host)
 {
+	MMC_TRACE(host->mmc,
+		"%s: 0x04=0x%08x 0x06=0x%08x 0x0E=0x%08x 0x30=0x%08x 0x34=0x%08x 0x38=0x%08x\n",
+		__func__,
+		sdhci_readw(host, SDHCI_BLOCK_SIZE),
+		sdhci_readw(host, SDHCI_BLOCK_COUNT),
+		sdhci_readw(host, SDHCI_COMMAND),
+		sdhci_readl(host, SDHCI_INT_STATUS),
+		sdhci_readl(host, SDHCI_INT_ENABLE),
+		sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+	mmc_stop_tracing(host->mmc);
+
 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
 
 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
-		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
-		   sdhci_readw(host, SDHCI_HOST_VERSION));
+	       sdhci_readl(host, SDHCI_DMA_ADDRESS),
+	       sdhci_readw(host, SDHCI_HOST_VERSION));
 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
-		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
-		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
+	       sdhci_readw(host, SDHCI_BLOCK_SIZE),
+	       sdhci_readw(host, SDHCI_BLOCK_COUNT));
 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
-		   sdhci_readl(host, SDHCI_ARGUMENT),
-		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
+	       sdhci_readl(host, SDHCI_ARGUMENT),
+	       sdhci_readw(host, SDHCI_TRANSFER_MODE));
 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
-		   sdhci_readl(host, SDHCI_PRESENT_STATE),
-		   sdhci_readb(host, SDHCI_HOST_CONTROL));
+	       sdhci_readl(host, SDHCI_PRESENT_STATE),
+	       sdhci_readb(host, SDHCI_HOST_CONTROL));
 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
-		   sdhci_readb(host, SDHCI_POWER_CONTROL),
-		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
+	       sdhci_readb(host, SDHCI_POWER_CONTROL),
+	       sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
-		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
-		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
+	       sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
+	       sdhci_readw(host, SDHCI_CLOCK_CONTROL));
 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
-		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
-		   sdhci_readl(host, SDHCI_INT_STATUS));
+	       sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
+	       sdhci_readl(host, SDHCI_INT_STATUS));
 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
-		   sdhci_readl(host, SDHCI_INT_ENABLE),
-		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+	       sdhci_readl(host, SDHCI_INT_ENABLE),
+	       sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
 	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
 		   sdhci_readw(host, SDHCI_ACMD12_ERR),
-		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
+	       sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
-		   sdhci_readl(host, SDHCI_CAPABILITIES),
-		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
+	       sdhci_readl(host, SDHCI_CAPABILITIES),
+	       sdhci_readl(host, SDHCI_CAPABILITIES_1));
 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
-		   sdhci_readw(host, SDHCI_COMMAND),
-		   sdhci_readl(host, SDHCI_MAX_CURRENT));
+	       sdhci_readw(host, SDHCI_COMMAND),
+	       sdhci_readl(host, SDHCI_MAX_CURRENT));
 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
 		   sdhci_readl(host, SDHCI_RESPONSE),
 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
@@ -98,7 +135,7 @@
 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
-		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
+	       sdhci_readw(host, SDHCI_HOST_CONTROL2));
 
 	if (host->flags & SDHCI_USE_ADMA) {
 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
@@ -113,7 +150,15 @@
 		}
 	}
 
+	host->mmc->err_occurred = true;
+
+	if (host->ops->dump_vendor_regs)
+		host->ops->dump_vendor_regs(host);
+	sdhci_dump_state(host);
 	SDHCI_DUMP("============================================\n");
+	/* crash the system upon setting this debugfs. */
+	if (host->mmc->crash_on_err)
+		BUG_ON(1);
 }
 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
 
@@ -180,6 +225,7 @@
 {
 	ktime_t timeout;
 
+retry_reset:
 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
 
 	if (mask & SDHCI_RESET_ALL) {
@@ -192,16 +238,56 @@
 	/* Wait max 100 ms */
 	timeout = ktime_add_ms(ktime_get(), 100);
 
+	if (host->ops->check_power_status && host->pwr &&
+	    (mask & SDHCI_RESET_ALL))
+		host->ops->check_power_status(host, REQ_BUS_OFF);
+
+	/* clear pending normal/error interrupt status */
+	sdhci_writel(host, sdhci_readl(host, SDHCI_INT_STATUS),
+			SDHCI_INT_STATUS);
+
 	/* hw clears the bit when it's done */
 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 		if (ktime_after(ktime_get(), timeout)) {
 			pr_err("%s: Reset 0x%x never completed.\n",
 				mmc_hostname(host->mmc), (int)mask);
+			MMC_TRACE(host->mmc, "%s: Reset 0x%x never completed\n",
+					__func__, (int)mask);
+			if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND)
+				&& host->ops->reset_workaround) {
+				if (!host->reset_wa_applied) {
+					/*
+					 * apply the workaround and issue
+					 * reset again.
+					 */
+					host->ops->reset_workaround(host, 1);
+					host->reset_wa_applied = 1;
+					host->reset_wa_cnt++;
+					goto retry_reset;
+				} else {
+					pr_err("%s: Reset 0x%x failed with workaround\n",
+						mmc_hostname(host->mmc),
+						(int)mask);
+					/* clear the workaround */
+					host->ops->reset_workaround(host, 0);
+					host->reset_wa_applied = 0;
+				}
+			}
+
 			sdhci_dumpregs(host);
 			return;
 		}
 		udelay(10);
 	}
+
+	if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND) &&
+			host->ops->reset_workaround && host->reset_wa_applied) {
+		pr_info("%s: Reset 0x%x successful with workaround\n",
+				mmc_hostname(host->mmc), (int)mask);
+		/* clear the workaround */
+		host->ops->reset_workaround(host, 0);
+		host->reset_wa_applied = 0;
+	}
 }
 EXPORT_SYMBOL_GPL(sdhci_reset);
 
@@ -225,6 +311,8 @@
 		/* Resetting the controller clears many */
 		host->preset_enabled = false;
 	}
+	if (host->is_crypto_en)
+		host->crypto_reset_reqd = true;
 }
 
 static void sdhci_set_default_irqs(struct sdhci_host *host)
@@ -233,7 +321,7 @@
 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
-		    SDHCI_INT_RESPONSE;
+		    SDHCI_INT_RESPONSE | SDHCI_INT_ACMD12ERR;
 
 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
@@ -294,9 +382,12 @@
 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
 	unsigned long flags;
 
+	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+		return;
+
 	spin_lock_irqsave(&host->lock, flags);
 
-	if (host->runtime_suspended)
+	if (host->runtime_suspended || sdhci_check_state(host))
 		goto out;
 
 	if (brightness == LED_OFF)
@@ -668,6 +759,9 @@
 	void *align;
 	char *buffer;
 	unsigned long flags;
+	u32 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+
+	trace_mmc_adma_table_post(command, data->sg_len);
 
 	if (data->flags & MMC_DATA_READ) {
 		bool has_unaligned = false;
@@ -777,6 +871,7 @@
 	u8 count;
 	struct mmc_data *data = cmd->data;
 	unsigned target_timeout, current_timeout;
+	u32 curr_clk = 0; /* In KHz */
 
 	*too_big = true;
 
@@ -807,7 +902,14 @@
 	 *     (1) / (2) > 2^6
 	 */
 	count = 0;
-	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK) {
+		curr_clk = host->clock / 1000;
+		if (host->quirks2 & SDHCI_QUIRK2_DIVIDE_TOUT_BY_4)
+			curr_clk /= 4;
+		current_timeout = (1 << 13) * 1000 / curr_clk;
+	} else {
+		current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+	}
 	while (current_timeout < target_timeout) {
 		count++;
 		current_timeout <<= 1;
@@ -816,7 +918,8 @@
 	}
 
 	if (count >= 0xF) {
-		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
+		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) ||
+		    !(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT))
 			DBG("Too large timeout 0x%x requested for CMD%d!\n",
 			    count, cmd->opcode);
 		count = 0xE;
@@ -874,6 +977,17 @@
 	}
 }
 
+static void sdhci_set_blk_size_reg(struct sdhci_host *host, unsigned int blksz,
+				   unsigned int sdma_boundary)
+{
+	if (host->flags & SDHCI_USE_ADMA)
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(0, blksz),
+			     SDHCI_BLOCK_SIZE);
+	else
+		sdhci_writew(host, SDHCI_MAKE_BLKSZ(sdma_boundary, blksz),
+			     SDHCI_BLOCK_SIZE);
+}
+
 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
 {
 	u8 ctrl;
@@ -890,7 +1004,7 @@
 	WARN_ON(host->data);
 
 	/* Sanity checks */
-	BUG_ON(data->blksz * data->blocks > 524288);
+	BUG_ON(data->blksz * data->blocks > host->mmc->max_req_size);
 	BUG_ON(data->blksz > host->mmc->max_blk_size);
 	BUG_ON(data->blocks > 65535);
 
@@ -905,6 +1019,10 @@
 
 		host->flags |= SDHCI_REQ_USE_DMA;
 
+		if ((host->quirks2 & SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING) &&
+			cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+			host->flags &= ~SDHCI_REQ_USE_DMA;
+
 		/*
 		 * FIXME: This doesn't account for merging when mapping the
 		 * scatterlist.
@@ -959,6 +1077,7 @@
 			WARN_ON(1);
 			host->flags &= ~SDHCI_REQ_USE_DMA;
 		} else if (host->flags & SDHCI_USE_ADMA) {
+			trace_mmc_adma_table_pre(cmd->opcode, data->sg_len);
 			sdhci_adma_table_pre(host, data, sg_cnt);
 
 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
@@ -1006,11 +1125,14 @@
 	}
 
 	sdhci_set_transfer_irqs(host);
-
 	/* Set the DMA boundary value and block size */
-	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
-		     SDHCI_BLOCK_SIZE);
+	sdhci_set_blk_size_reg(host, data->blksz, SDHCI_DEFAULT_BOUNDARY_ARG);
 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+	MMC_TRACE(host->mmc,
+		"%s: 0x28=0x%08x 0x3E=0x%08x 0x06=0x%08x\n", __func__,
+		sdhci_readb(host, SDHCI_HOST_CONTROL),
+		sdhci_readw(host, SDHCI_HOST_CONTROL2),
+		sdhci_readw(host, SDHCI_BLOCK_COUNT));
 }
 
 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
@@ -1061,12 +1183,26 @@
 		}
 	}
 
-	if (data->flags & MMC_DATA_READ)
+	if (data->flags & MMC_DATA_READ) {
 		mode |= SDHCI_TRNS_READ;
+		if (host->ops->toggle_cdr) {
+			if ((cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
+				(cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
+				(cmd->opcode == MMC_SEND_TUNING_BLOCK))
+				host->ops->toggle_cdr(host, false);
+			else
+				host->ops->toggle_cdr(host, true);
+		}
+	}
+	if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+		host->ops->toggle_cdr(host, false);
 	if (host->flags & SDHCI_REQ_USE_DMA)
 		mode |= SDHCI_TRNS_DMA;
 
 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+	MMC_TRACE(host->mmc, "%s: 0x00=0x%08x 0x0C=0x%08x\n", __func__,
+		sdhci_readw(host, SDHCI_ARGUMENT2),
+		sdhci_readw(host, SDHCI_TRANSFER_MODE));
 }
 
 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
@@ -1212,7 +1348,7 @@
 		cmd->flags |= MMC_RSP_BUSY;
 
 	/* Wait max 10 ms */
-	timeout = 10;
+	timeout = 10000;
 
 	mask = SDHCI_CMD_INHIBIT;
 	if (sdhci_data_line_cmd(cmd))
@@ -1227,13 +1363,16 @@
 		if (timeout == 0) {
 			pr_err("%s: Controller never released inhibit bit(s).\n",
 			       mmc_hostname(host->mmc));
+			MMC_TRACE(host->mmc,
+			"%s :Controller never released inhibit bit(s)\n",
+			__func__);
 			sdhci_dumpregs(host);
 			cmd->error = -EIO;
 			sdhci_finish_mrq(host, cmd->mrq);
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		udelay(1);
 	}
 
 	host->cmd = cmd;
@@ -1284,7 +1423,15 @@
 		timeout += 10 * HZ;
 	sdhci_mod_timer(host, cmd->mrq, timeout);
 
+	if (cmd->data)
+		host->data_start_time = ktime_get();
+	trace_mmc_cmd_rw_start(cmd->opcode, cmd->arg, cmd->flags);
 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+	MMC_TRACE(host->mmc,
+		"%s: updated 0x8=0x%08x 0xC=0x%08x 0xE=0x%08x\n", __func__,
+		sdhci_readl(host, SDHCI_ARGUMENT),
+		sdhci_readw(host, SDHCI_TRANSFER_MODE),
+		sdhci_readw(host, SDHCI_COMMAND));
 }
 EXPORT_SYMBOL_GPL(sdhci_send_command);
 
@@ -1317,8 +1464,14 @@
 	if (cmd->flags & MMC_RSP_PRESENT) {
 		if (cmd->flags & MMC_RSP_136) {
 			sdhci_read_rsp_136(host, cmd);
+			MMC_TRACE(host->mmc,
+			"%s: resp 0: 0x%08x resp 1: 0x%08x resp 2: 0x%08x resp 3: 0x%08x\n",
+			__func__, cmd->resp[0], cmd->resp[1],
+			cmd->resp[2], cmd->resp[3]);
 		} else {
 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+			MMC_TRACE(host->mmc, "%s: resp 0: 0x%08x\n",
+				__func__, cmd->resp[0]);
 		}
 	}
 
@@ -1478,6 +1631,10 @@
 clock_set:
 	if (real_div)
 		*actual_clock = (host->max_clk * clk_mul) / real_div;
+
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+		div = 0;
+
 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
 		<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1500,6 +1657,8 @@
 		if (ktime_after(ktime_get(), timeout)) {
 			pr_err("%s: Internal clock never stabilised.\n",
 			       mmc_hostname(host->mmc));
+			MMC_TRACE(host->mmc,
+			"%s: Internal clock never stabilised.\n", __func__);
 			sdhci_dumpregs(host);
 			return;
 		}
@@ -1517,7 +1676,8 @@
 
 	host->mmc->actual_clock = 0;
 
-	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+	if (host->clock)
+		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
 
 	if (clock == 0)
 		return;
@@ -1579,6 +1739,8 @@
 
 	if (pwr == 0) {
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_BUS_OFF);
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_off(host);
 	} else {
@@ -1586,20 +1748,29 @@
 		 * Spec says that we should clear the power reg before setting
 		 * a new value. Some controllers don't seem to like this though.
 		 */
-		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host,
+					REQ_BUS_OFF);
+		}
 		/*
 		 * At least the Marvell CaFe chip gets confused if we set the
 		 * voltage and set turn on power at the same time, so set the
 		 * voltage first.
 		 */
-		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host,
+					REQ_BUS_ON);
+		}
 
 		pwr |= SDHCI_POWER_ON;
 
 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_BUS_ON);
 
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_on(host);
@@ -1630,6 +1801,148 @@
  *                                                                           *
 \*****************************************************************************/
 
+static int sdhci_enable(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->platform_bus_voting)
+		host->ops->platform_bus_voting(host, 1);
+
+	return 0;
+}
+
+static int sdhci_disable(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->platform_bus_voting)
+		host->ops->platform_bus_voting(host, 0);
+
+	return 0;
+}
+
+static void sdhci_notify_halt(struct mmc_host *mmc, bool halt)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	pr_debug("%s: halt notification was sent, halt=%d\n",
+		mmc_hostname(mmc), halt);
+	if (host->flags & SDHCI_USE_64_BIT_DMA) {
+		if (halt)
+			host->desc_sz = 16;
+		else
+			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+	}
+}
+
+static inline void sdhci_update_power_policy(struct sdhci_host *host,
+		enum sdhci_power_policy policy)
+{
+	host->power_policy = policy;
+}
+
+static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
+{
+	int err = 0;
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	switch (state) {
+	case MMC_LOAD_HIGH:
+		sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
+		break;
+	case MMC_LOAD_LOW:
+		sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	if (host->ops->notify_load)
+		err = host->ops->notify_load(host, state);
+
+	return err;
+}
+
+static bool sdhci_check_state(struct sdhci_host *host)
+{
+	if (!host->clock || !host->pwr)
+		return true;
+	else
+		return false;
+}
+
+static bool sdhci_check_auto_tuning(struct sdhci_host *host,
+				  struct mmc_command *cmd)
+{
+	if (((cmd->opcode != MMC_READ_SINGLE_BLOCK) &&
+	     (cmd->opcode != MMC_READ_MULTIPLE_BLOCK) &&
+	     (cmd->opcode != SD_IO_RW_EXTENDED)) || (host->clock < 100000000))
+		return false;
+	else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+		 host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+		return true;
+	else
+		return false;
+}
+
+static int sdhci_get_tuning_cmd(struct sdhci_host *host)
+{
+	if (!host->mmc || !host->mmc->card)
+		return 0;
+	/*
+	 * If we are here, all conditions have already been true
+	 * and the card can either be an eMMC or SD/SDIO
+	 */
+	if (mmc_card_mmc(host->mmc->card))
+		return MMC_SEND_TUNING_BLOCK_HS200;
+	else
+		return MMC_SEND_TUNING_BLOCK;
+}
+
+static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+		u32 slot)
+{
+	int err = 0;
+
+	if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+		err = host->ops->crypto_engine_reset(host);
+		if (err) {
+			pr_err("%s: crypto reset failed\n",
+					mmc_hostname(host->mmc));
+			goto out;
+		}
+		host->crypto_reset_reqd = false;
+	}
+
+	if (host->ops->crypto_engine_cfg) {
+		err = host->ops->crypto_engine_cfg(host, mrq, slot);
+		if (err) {
+			pr_err("%s: failed to configure crypto\n",
+					mmc_hostname(host->mmc));
+			goto out;
+		}
+	}
+out:
+	return err;
+}
+
+static int sdhci_crypto_cfg_end(struct sdhci_host *host,
+				struct mmc_request *mrq)
+{
+	int err = 0;
+
+	if (host->ops->crypto_engine_cfg_end) {
+		err = host->ops->crypto_engine_cfg_end(host, mrq);
+		if (err) {
+			pr_err("%s: failed to configure crypto\n",
+					mmc_hostname(host->mmc));
+			return err;
+		}
+	}
+	return 0;
+}
+
 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sdhci_host *host;
@@ -1638,12 +1951,39 @@
 
 	host = mmc_priv(mmc);
 
-	/* Firstly check card presence */
+	if (sdhci_check_state(host)) {
+		sdhci_dump_state(host);
+		WARN(1, "sdhci in bad state");
+		mrq->cmd->error = -EIO;
+		if (mrq->data)
+			mrq->data->error = -EIO;
+		host->mrq = NULL;
+		sdhci_dumpregs(host);
+		mmc_request_done(host->mmc, mrq);
+		return;
+	}
+
+	/*
+	 * Firstly check card presence from cd-gpio.  The return could
+	 * be one of the following possibilities:
+	 *     negative: cd-gpio is not available
+	 *     zero: cd-gpio is used, and card is removed
+	 *     one: cd-gpio is used, and card is present
+	 */
 	present = mmc->ops->get_cd(mmc);
+	if (present < 0) {
+		/* If polling, assume that the card is always present. */
+		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+			present = 1;
+		else
+			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+					SDHCI_CARD_PRESENT;
+	}
 
 	spin_lock_irqsave(&host->lock, flags);
 
-	sdhci_led_activate(host);
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+		sdhci_led_activate(host);
 
 	/*
 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
@@ -1660,6 +2000,22 @@
 		mrq->cmd->error = -ENOMEDIUM;
 		sdhci_finish_mrq(host, mrq);
 	} else {
+		if (host->ops->config_auto_tuning_cmd) {
+			if (sdhci_check_auto_tuning(host, mrq->cmd))
+				host->ops->config_auto_tuning_cmd(host, true,
+					sdhci_get_tuning_cmd(host));
+			else
+				host->ops->config_auto_tuning_cmd(host, false,
+					sdhci_get_tuning_cmd(host));
+		}
+
+		if (host->is_crypto_en) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			if (sdhci_crypto_cfg(host, mrq, 0))
+				goto end_req;
+			spin_lock_irqsave(&host->lock, flags);
+		}
+
 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
 			sdhci_send_command(host, mrq->sbc);
 		else
@@ -1668,6 +2024,12 @@
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
+	return;
+end_req:
+	mrq->cmd->error = -EIO;
+	if (mrq->data)
+		mrq->data->error = -EIO;
+	mmc_request_done(host->mmc, mrq);
 }
 
 void sdhci_set_bus_width(struct sdhci_host *host, int width)
@@ -1715,10 +2077,27 @@
 }
 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
 
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync)
+{
+	if (enable && !(host->flags & SDHCI_HOST_IRQ_STATUS)) {
+		enable_irq(host->irq);
+		host->flags |= SDHCI_HOST_IRQ_STATUS;
+	} else if (!enable && (host->flags & SDHCI_HOST_IRQ_STATUS)) {
+		if (sync)
+			disable_irq(host->irq);
+		else
+			disable_irq_nosync(host->irq);
+		host->flags &= ~SDHCI_HOST_IRQ_STATUS;
+	}
+}
+EXPORT_SYMBOL(sdhci_cfg_irq);
+
 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
+	unsigned long flags;
 	u8 ctrl;
+	int ret;
 
 	if (ios->power_mode == MMC_POWER_UNDEFINED)
 		return;
@@ -1730,22 +2109,21 @@
 		return;
 	}
 
-	/*
-	 * Reset the chip on each power off.
-	 * Should clear out any weird states.
-	 */
-	if (ios->power_mode == MMC_POWER_OFF) {
-		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
-		sdhci_reinit(host);
-	}
-
 	if (host->version >= SDHCI_SPEC_300 &&
 		(ios->power_mode == MMC_POWER_UP) &&
 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
 		sdhci_enable_preset_value(host, false);
 
-	if (!ios->clock || ios->clock != host->clock) {
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->mmc && host->mmc->card &&
+			mmc_card_sdio(host->mmc->card))
+		sdhci_cfg_irq(host, false, false);
+
+	if (ios->clock &&
+	    ((ios->clock != host->clock) || (ios->timing != host->timing))) {
+		spin_unlock_irqrestore(&host->lock, flags);
 		host->ops->set_clock(host, ios->clock);
+		spin_lock_irqsave(&host->lock, flags);
 		host->clock = ios->clock;
 
 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
@@ -1760,11 +2138,48 @@
 			host->mmc->max_busy_timeout /= host->timeout_clk;
 		}
 	}
+	if (ios->clock && host->sdio_irq_async_status)
+		sdhci_enable_sdio_irq_nolock(host, false);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/*
+	 * The controller clocks may be off during power-up and we may end up
+	 * enabling card clock before giving power to the card. Hence, during
+	 * MMC_POWER_UP enable the controller clock and turn-on the regulators.
+	 * The mmc_power_up would provide the necessary delay before turning on
+	 * the clocks to the card.
+	 */
+	if (ios->power_mode & MMC_POWER_UP) {
+		if (host->ops->enable_controller_clock) {
+			ret = host->ops->enable_controller_clock(host);
+			if (ret) {
+				pr_err("%s: enabling controller clock: failed: %d\n",
+				       mmc_hostname(host->mmc), ret);
+			} else {
+				sdhci_set_power(host, ios->power_mode,
+						ios->vdd);
+			}
+		}
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (!host->clock) {
+		if (host->mmc && host->mmc->card &&
+				mmc_card_sdio(host->mmc->card))
+			sdhci_cfg_irq(host, true, false);
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&host->lock, flags);
 
 	if (host->ops->set_power)
 		host->ops->set_power(host, ios->power_mode, ios->vdd);
 	else
-		sdhci_set_power(host, ios->power_mode, ios->vdd);
+		if (!host->ops->enable_controller_clock && (ios->power_mode &
+					(MMC_POWER_UP | MMC_POWER_ON)))
+			sdhci_set_power(host, ios->power_mode, ios->vdd);
+
+	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->ops->platform_send_init_74_clocks)
 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1830,7 +2245,11 @@
 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
 			/* Re-enable SD Clock */
-			host->ops->set_clock(host, host->clock);
+			if (ios->clock) {
+				spin_unlock_irqrestore(&host->lock, flags);
+				host->ops->set_clock(host, host->clock);
+				spin_lock_irqsave(&host->lock, flags);
+			}
 		}
 
 		/* Reset SD Clock Enable */
@@ -1857,10 +2276,15 @@
 		}
 
 		/* Re-enable SD Clock */
-		host->ops->set_clock(host, host->clock);
+		if (ios->clock) {
+			spin_unlock_irqrestore(&host->lock, flags);
+			host->ops->set_clock(host, host->clock);
+			spin_lock_irqsave(&host->lock, flags);
+		}
 	} else
 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
+	spin_unlock_irqrestore(&host->lock, flags);
 	/*
 	 * Some (ENE) controllers go apeshit on some ios operation,
 	 * signalling timeout and CRC errors even on CMD0. Resetting
@@ -1869,6 +2293,24 @@
 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
 
+	/*
+	 * Reset the chip on each power off.
+	 * Should clear out any weird states.
+	 */
+	if (ios->power_mode == MMC_POWER_OFF) {
+		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+		sdhci_reinit(host);
+		sdhci_set_power(host, ios->power_mode, ios->vdd);
+	}
+	if (!ios->clock)
+		host->ops->set_clock(host, ios->clock);
+
+	spin_lock_irqsave(&host->lock, flags);
+	if (host->mmc && host->mmc->card &&
+			mmc_card_sdio(host->mmc->card))
+		sdhci_cfg_irq(host, true, false);
+	spin_unlock_irqrestore(&host->lock, flags);
+
 	mmiowb();
 }
 EXPORT_SYMBOL_GPL(sdhci_set_ios);
@@ -1953,16 +2395,28 @@
 
 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
 {
-	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
-		if (enable)
-			host->ier |= SDHCI_INT_CARD_INT;
-		else
-			host->ier &= ~SDHCI_INT_CARD_INT;
+	u16 ctrl = 0;
 
-		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
-		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
-		mmiowb();
+	if (host->flags & SDHCI_DEVICE_DEAD)
+		return;
+
+	if (mmc_card_and_host_support_async_int(host->mmc)) {
+		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+		if (enable)
+			ctrl |= SDHCI_CTRL_ASYNC_INT_ENABLE;
+		else
+			ctrl &= ~SDHCI_CTRL_ASYNC_INT_ENABLE;
+		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
 	}
+
+	if (enable)
+		host->ier |= SDHCI_INT_CARD_INT;
+	else
+		host->ier &= ~SDHCI_INT_CARD_INT;
+
+	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+	mmiowb();
 }
 
 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -2010,6 +2464,8 @@
 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
 		ctrl &= ~SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_IO_HIGH);
 
 		if (!IS_ERR(mmc->supply.vqmmc)) {
 			ret = mmc_regulator_set_vqmmc(mmc, ios);
@@ -2049,6 +2505,8 @@
 		 */
 		ctrl |= SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_IO_LOW);
 
 		/* Some controller need to do more when switching */
 		if (host->ops->voltage_switch)
@@ -2256,6 +2714,17 @@
 	sdhci_reset_tuning(host);
 }
 
+static int sdhci_enhanced_strobe(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	int err = 0;
+
+	if (host->ops->enhanced_strobe)
+		err = host->ops->enhanced_strobe(host);
+
+	return err;
+}
+
 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
@@ -2278,9 +2747,10 @@
 	switch (host->timing) {
 	/* HS400 tuning is done in HS200 mode */
 	case MMC_TIMING_MMC_HS400:
-		err = -EINVAL;
-		goto out;
-
+		if (!(mmc->caps2 & MMC_CAP2_HS400_POST_TUNING)) {
+			err = -EINVAL;
+			goto out;
+		}
 	case MMC_TIMING_MMC_HS200:
 		/*
 		 * Periodic re-tuning for HS400 is not expected to be needed, so
@@ -2305,8 +2775,8 @@
 
 	if (host->ops->platform_execute_tuning) {
 		err = host->ops->platform_execute_tuning(host, opcode);
-		goto out;
-	}
+			goto out;
+		}
 
 	host->mmc->retune_period = tuning_count;
 
@@ -2331,6 +2801,9 @@
 	if (host->version < SDHCI_SPEC_300)
 		return;
 
+	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+		return;
+
 	/*
 	 * We only enable or disable Preset Value if they are not already
 	 * enabled or disabled respectively. Otherwise, we bail out.
@@ -2365,6 +2838,9 @@
 			     mmc_get_dma_dir(data));
 
 	data->host_cookie = COOKIE_UNMAPPED;
+
+	if (host->ops->post_req)
+		host->ops->post_req(host, mrq);
 }
 
 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -2380,6 +2856,9 @@
 	 */
 	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
+
+	if (host->ops->pre_req)
+		host->ops->pre_req(host, mrq);
 }
 
 static inline bool sdhci_has_requests(struct sdhci_host *host)
@@ -2430,7 +2909,27 @@
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
+static int sdhci_late_init(struct mmc_host *mmc)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (host->ops->init)
+		host->ops->init(host);
+
+	return 0;
+}
+
+static void sdhci_force_err_irq(struct mmc_host *mmc, u64 errmask)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+	u16 mask = errmask & 0xFFFF;
+
+	pr_err("%s: Force raise error mask:0x%04x\n", __func__, mask);
+	sdhci_writew(host, mask, SDHCI_SET_INT_ERROR);
+}
+
 static const struct mmc_host_ops sdhci_ops = {
+	.init           = sdhci_late_init,
 	.request	= sdhci_request,
 	.post_req	= sdhci_post_req,
 	.pre_req	= sdhci_pre_req,
@@ -2442,8 +2941,14 @@
 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
 	.execute_tuning			= sdhci_execute_tuning,
+	.enhanced_strobe		= sdhci_enhanced_strobe,
 	.card_event			= sdhci_card_event,
 	.card_busy	= sdhci_card_busy,
+	.enable		= sdhci_enable,
+	.disable	= sdhci_disable,
+	.notify_load	= sdhci_notify_load,
+	.notify_halt	= sdhci_notify_halt,
+	.force_err_irq	= sdhci_force_err_irq,
 };
 
 /*****************************************************************************\
@@ -2552,16 +3057,22 @@
 		sdhci_do_reset(host, SDHCI_RESET_DATA);
 
 		host->pending_reset = false;
+	} else {
+		if (host->quirks2 & SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT)
+			sdhci_reset(host, SDHCI_RESET_DATA);
 	}
 
 	if (!sdhci_has_requests(host))
-		sdhci_led_deactivate(host);
+		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+			sdhci_led_deactivate(host);
 
 	host->mrqs_done[i] = NULL;
+	host->auto_cmd_err_sts = 0;
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_crypto_cfg_end(host, mrq);
 	mmc_request_done(host->mmc, mrq);
 
 	return false;
@@ -2585,8 +3096,10 @@
 	spin_lock_irqsave(&host->lock, flags);
 
 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
+		host->mmc->err_stats[MMC_ERR_REQ_TIMEOUT]++;
 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
 		       mmc_hostname(host->mmc));
+		MMC_TRACE(host->mmc, "Timeout waiting for h/w interrupt\n");
 		sdhci_dumpregs(host);
 
 		host->cmd->error = -ETIMEDOUT;
@@ -2608,11 +3121,18 @@
 
 	if (host->data || host->data_cmd ||
 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
+		host->mmc->err_stats[MMC_ERR_REQ_TIMEOUT]++;
 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
 		       mmc_hostname(host->mmc));
+		MMC_TRACE(host->mmc, "Timeout waiting for h/w interrupt\n");
 		sdhci_dumpregs(host);
 
 		if (host->data) {
+			pr_info("%s: bytes to transfer: %d transferred: %d\n",
+				mmc_hostname(host->mmc),
+				(host->data->blksz * host->data->blocks),
+				(sdhci_readw(host, SDHCI_BLOCK_SIZE) & 0xFFF) *
+				sdhci_readw(host, SDHCI_BLOCK_COUNT));
 			host->data->error = -ETIMEDOUT;
 			sdhci_finish_data(host);
 		} else if (host->data_cmd) {
@@ -2636,6 +3156,7 @@
 
 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 {
+	u16 auto_cmd_status;
 	if (!host->cmd) {
 		/*
 		 * SDHCI recovers from errors by resetting the cmd and data
@@ -2646,16 +3167,42 @@
 			return;
 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
 		       mmc_hostname(host->mmc), (unsigned)intmask);
+		MMC_TRACE(host->mmc,
+		"Got command interrupt 0x%08x even though no command operation was in progress.\n",
+		(unsigned int)intmask);
 		sdhci_dumpregs(host);
 		return;
 	}
 
+	trace_mmc_cmd_rw_end(host->cmd->opcode, intmask,
+				sdhci_readl(host, SDHCI_RESPONSE));
+
 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
-		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
-		if (intmask & SDHCI_INT_TIMEOUT)
+		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX |
+		       SDHCI_INT_ACMD12ERR)) {
+		if (intmask & SDHCI_INT_TIMEOUT) {
 			host->cmd->error = -ETIMEDOUT;
-		else
+			host->mmc->err_stats[MMC_ERR_CMD_TIMEOUT]++;
+		} else {
 			host->cmd->error = -EILSEQ;
+			host->mmc->err_stats[MMC_ERR_CMD_CRC]++;
+		}
+
+		if (intmask & SDHCI_INT_ACMD12ERR) {
+			auto_cmd_status = host->auto_cmd_err_sts;
+			host->mmc->err_stats[MMC_ERR_AUTO_CMD]++;
+			pr_err_ratelimited("%s: %s: AUTO CMD err sts 0x%08x\n",
+				mmc_hostname(host->mmc), __func__,
+					auto_cmd_status);
+			if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
+					       SDHCI_AUTO_CMD_INDEX_ERR |
+					       SDHCI_AUTO_CMD_ENDBIT_ERR))
+				host->cmd->error = -EIO;
+			else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
+				host->cmd->error = -ETIMEDOUT;
+			else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
+				host->cmd->error = -EILSEQ;
+		}
 
 		/*
 		 * If this command initiates a data phase and a response
@@ -2666,10 +3213,13 @@
 		 * If the card did not receive the command or returned an
 		 * error which prevented it sending data, the data phase
 		 * will time out.
+		 *
+		 * Even in case of cmd INDEX OR ENDBIT error we
+		 * handle it the same way.
 		 */
 		if (host->cmd->data &&
-		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
-		     SDHCI_INT_CRC) {
+		    (((intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+		     SDHCI_INT_CRC) || (host->cmd->error == -EILSEQ))) {
 			host->cmd = NULL;
 			return;
 		}
@@ -2692,13 +3242,13 @@
 		struct sdhci_adma2_64_desc *dma_desc = desc;
 
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			DBG("%pK: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
 			    desc, le32_to_cpu(dma_desc->addr_hi),
 			    le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
 		else
-			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+			DBG("%pK: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
 			    desc, le32_to_cpu(dma_desc->addr_lo),
 			    le16_to_cpu(dma_desc->len),
 			    le16_to_cpu(dma_desc->cmd));
@@ -2713,12 +3263,16 @@
 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
 	u32 command;
+	bool pr_msg = false;
+
+	command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+	trace_mmc_data_rw_end(command, intmask);
 
 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
 	if (intmask & SDHCI_INT_DATA_AVAIL) {
-		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
-		if (command == MMC_SEND_TUNING_BLOCK ||
-		    command == MMC_SEND_TUNING_BLOCK_HS200) {
+		if (!(host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) &&
+			(command == MMC_SEND_TUNING_BLOCK ||
+			command == MMC_SEND_TUNING_BLOCK_HS200)) {
 			host->tuning_done = 1;
 			wake_up(&host->buf_ready_int);
 			return;
@@ -2737,6 +3291,7 @@
 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
 				host->data_cmd = NULL;
 				data_cmd->error = -ETIMEDOUT;
+				host->mmc->err_stats[MMC_ERR_CMD_TIMEOUT]++;
 				sdhci_finish_mrq(host, data_cmd->mrq);
 				return;
 			}
@@ -2753,6 +3308,23 @@
 				sdhci_finish_mrq(host, data_cmd->mrq);
 				return;
 			}
+			if (host->quirks2 &
+				SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD) {
+				pr_err_ratelimited("%s: %s: ignoring interrupt: 0x%08x due to DATATOUT_FOR_R1B quirk\n",
+						mmc_hostname(host->mmc),
+						__func__, intmask);
+				MMC_TRACE(host->mmc,
+					"%s: Quirk ignoring intr: 0x%08x\n",
+						__func__, intmask);
+				return;
+			}
+			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+				host->data_cmd = NULL;
+				data_cmd->error = -ETIMEDOUT;
+				host->mmc->err_stats[MMC_ERR_CMD_TIMEOUT]++;
+				sdhci_finish_mrq(host, data_cmd->mrq);
+				return;
+			}
 		}
 
 		/*
@@ -2765,30 +3337,60 @@
 
 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
 		       mmc_hostname(host->mmc), (unsigned)intmask);
+		MMC_TRACE(host->mmc,
+		"Got data interrupt 0x%08x even though no data operation was in progress.\n",
+		(unsigned int)intmask);
 		sdhci_dumpregs(host);
 
 		return;
 	}
 
-	if (intmask & SDHCI_INT_DATA_TIMEOUT)
+	if (intmask & SDHCI_INT_DATA_TIMEOUT) {
 		host->data->error = -ETIMEDOUT;
+		host->mmc->err_stats[MMC_ERR_DAT_TIMEOUT]++;
+	}
 	else if (intmask & SDHCI_INT_DATA_END_BIT)
 		host->data->error = -EILSEQ;
 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
-		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
-			!= MMC_BUS_TEST_R)
+		(command != MMC_BUS_TEST_R)) {
 		host->data->error = -EILSEQ;
+		host->mmc->err_stats[MMC_ERR_DAT_CRC]++;
+	}
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
 		sdhci_adma_show_error(host);
+		host->mmc->err_stats[MMC_ERR_ADMA]++;
 		host->data->error = -EIO;
 		if (host->ops->adma_workaround)
 			host->ops->adma_workaround(host, intmask);
 	}
+	if (host->data->error) {
+		if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT
+					| SDHCI_INT_DATA_END_BIT)) {
+			command = SDHCI_GET_CMD(sdhci_readw(host,
+							    SDHCI_COMMAND));
+			if ((command != MMC_SEND_TUNING_BLOCK_HS200) &&
+			    (command != MMC_SEND_TUNING_BLOCK))
+				pr_msg = true;
+		} else {
+			pr_msg = true;
+		}
+		if (pr_msg && __ratelimit(&host->dbg_dump_rs)) {
+			pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
+			       mmc_hostname(host->mmc), intmask,
+			       host->data->error, ktime_to_ms(ktime_sub(
+			       ktime_get(), host->data_start_time)));
+			MMC_TRACE(host->mmc,
+				"data txfr (0x%08x) error: %d after %lld ms\n",
+				intmask, host->data->error,
+				ktime_to_ms(ktime_sub(ktime_get(),
+				host->data_start_time)));
 
-	if (host->data->error)
+			if (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104)
+				sdhci_dumpregs(host);
+		}
 		sdhci_finish_data(host);
-	else {
+	} else {
 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
 			sdhci_transfer_pio(host);
 
@@ -2847,6 +3449,31 @@
 		return IRQ_NONE;
 	}
 
+	if (!host->clock && host->mmc->card &&
+			mmc_card_sdio(host->mmc->card)) {
+		if (!mmc_card_and_host_support_async_int(host->mmc)) {
+			spin_unlock(&host->lock);
+			return IRQ_NONE;
+		}
+		/*
+		 * async card interrupt is level sensitive and received
+		 * when clocks are off.
+		 * If sdio card has asserted async interrupt, in that
+		 * case we need to disable host->irq.
+		 * Later we can disable card interrupt and re-enable
+		 * host->irq.
+		 */
+
+		pr_debug("%s: %s: sdio_async intr. received\n",
+				mmc_hostname(host->mmc), __func__);
+		sdhci_cfg_irq(host, false, false);
+		host->sdio_irq_async_status = true;
+		host->thread_isr |= SDHCI_INT_CARD_INT;
+		result = IRQ_WAKE_THREAD;
+		spin_unlock(&host->lock);
+		return result;
+	}
+
 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
 	if (!intmask || intmask == 0xffffffff) {
 		result = IRQ_NONE;
@@ -2862,6 +3489,13 @@
 				goto cont;
 		}
 
+		MMC_TRACE(host->mmc,
+			"%s: intmask: 0x%x\n", __func__, intmask);
+
+		if (intmask & SDHCI_INT_ACMD12ERR)
+			host->auto_cmd_err_sts = sdhci_readw(host,
+			SDHCI_ACMD12_ERR);
+
 		/* Clear selected interrupts. */
 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
 				  SDHCI_INT_BUS_POWER);
@@ -2897,11 +3531,19 @@
 			result = IRQ_WAKE_THREAD;
 		}
 
-		if (intmask & SDHCI_INT_CMD_MASK)
+		if (intmask & SDHCI_INT_CMD_MASK) {
+			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+			    (host->clock <= 400000))
+				udelay(40);
 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
+		}
 
-		if (intmask & SDHCI_INT_DATA_MASK)
+		if (intmask & SDHCI_INT_DATA_MASK) {
+			if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+			    (host->clock <= 400000))
+				udelay(40);
 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+		}
 
 		if (intmask & SDHCI_INT_BUS_POWER)
 			pr_err("%s: Card is consuming too much power!\n",
@@ -2938,6 +3580,8 @@
 	if (unexpected) {
 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
 			   mmc_hostname(host->mmc), unexpected);
+		MMC_TRACE(host->mmc, "Unexpected interrupt 0x%08x.\n",
+				unexpected);
 		sdhci_dumpregs(host);
 	}
 
@@ -2966,8 +3610,11 @@
 		sdio_run_irqs(host->mmc);
 
 		spin_lock_irqsave(&host->lock, flags);
-		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+		if (host->flags & SDHCI_SDIO_IRQ_ENABLED) {
+			if (host->sdio_irq_async_status)
+				host->sdio_irq_async_status = false;
 			sdhci_enable_sdio_irq_nolock(host, true);
+		}
 		spin_unlock_irqrestore(&host->lock, flags);
 	}
 
@@ -3322,11 +3969,34 @@
 
 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
 
+	spin_lock_init(&host->lock);
+	ratelimit_state_init(&host->dbg_dump_rs, SDHCI_DBG_DUMP_RS_INTERVAL,
+			SDHCI_DBG_DUMP_RS_BURST);
+
 	return host;
 }
 
 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
 
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+	u32 caps;
+
+	caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+		sdhci_readl(host, SDHCI_CAPABILITIES);
+
+	if (caps & SDHCI_CAN_64BIT)
+		return 1;
+	return 0;
+}
+#else
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+	return 0;
+}
+#endif
+
 static int sdhci_set_dma_mask(struct sdhci_host *host)
 {
 	struct mmc_host *mmc = host->mmc;
@@ -3473,6 +4143,7 @@
 int sdhci_setup_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
+	u32 caps[2] = {0, 0};
 	u32 max_current_caps;
 	unsigned int ocr_avail;
 	unsigned int override_timeout_clk;
@@ -3504,6 +4175,8 @@
 
 	sdhci_read_caps(host);
 
+	caps[0] = host->caps;
+
 	override_timeout_clk = host->timeout_clk;
 
 	if (host->version > SDHCI_SPEC_300) {
@@ -3541,7 +4214,7 @@
 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
 	 * implement.
 	 */
-	if (host->caps & SDHCI_CAN_64BIT)
+	if (sdhci_is_adma2_64bit(host))
 		host->flags |= SDHCI_USE_64_BIT_DMA;
 
 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -3705,6 +4378,9 @@
 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
 
+	if (caps[0] & SDHCI_CAN_ASYNC_INT)
+		mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+
 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
 		host->flags |= SDHCI_AUTO_CMD12;
 
@@ -3737,7 +4413,8 @@
 
 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
 	    mmc_card_is_removable(mmc) &&
-	    mmc_gpio_get_cd(host->mmc) < 0)
+	    mmc_gpio_get_cd(host->mmc) < 0 &&
+	    !(mmc->caps2 & MMC_CAP2_NONHOTPLUG) && !host->mmc->extcon)
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
 	if (!IS_ERR(mmc->supply.vqmmc)) {
@@ -3845,10 +4522,15 @@
 	 * value.
 	 */
 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
-	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
-		int curr = regulator_get_current_limit(mmc->supply.vmmc);
-		if (curr > 0) {
+	if (!max_current_caps) {
+		u32 curr = 0;
 
+		if (!IS_ERR(mmc->supply.vmmc))
+			curr = regulator_get_current_limit(mmc->supply.vmmc);
+		else if (host->ops->get_current_limit)
+			curr = host->ops->get_current_limit(host);
+
+		if (curr > 0) {
 			/* convert to SDHCI_MAX_CURRENT format */
 			curr = curr/1000;  /* convert to mA */
 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
@@ -3923,8 +4605,6 @@
 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
 		host->flags |= SDHCI_SIGNALING_120;
 
-	spin_lock_init(&host->lock);
-
 	/*
 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
@@ -4043,6 +4723,8 @@
 
 	init_waitqueue_head(&host->buf_ready_int);
 
+	host->flags |= SDHCI_HOST_IRQ_STATUS;
+
 	sdhci_init(host, 0);
 
 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
@@ -4053,31 +4735,41 @@
 		goto untasklet;
 	}
 
-	ret = sdhci_led_register(host);
-	if (ret) {
-		pr_err("%s: Failed to register LED device: %d\n",
-		       mmc_hostname(mmc), ret);
-		goto unirq;
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL)) {
+		ret = sdhci_led_register(host);
+		if (ret) {
+			pr_err("%s: Failed to register LED device: %d\n",
+			       mmc_hostname(mmc), ret);
+			goto unirq;
+		}
 	}
 
 	mmiowb();
 
+	if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR) {
+		host->ier = (host->ier & ~SDHCI_INT_DATA_END_BIT);
+		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+	}
+
+	pr_info("%s: SDHCI controller on %s [%s] using %s in %s mode\n",
+	mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+		(host->flags & SDHCI_USE_ADMA) ?
+		((host->flags & SDHCI_USE_64_BIT_DMA) ?
+		"64-bit ADMA" : "32-bit ADMA") :
+		((host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"));
+
+	sdhci_enable_card_detection(host);
+
 	ret = mmc_add_host(mmc);
 	if (ret)
 		goto unled;
 
-	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
-		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
-		(host->flags & SDHCI_USE_ADMA) ?
-		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
-		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
-
-	sdhci_enable_card_detection(host);
-
 	return 0;
 
 unled:
-	sdhci_led_unregister(host);
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+		sdhci_led_unregister(host);
 unirq:
 	sdhci_do_reset(host, SDHCI_RESET_ALL);
 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
@@ -4134,7 +4826,8 @@
 
 	mmc_remove_host(mmc);
 
-	sdhci_led_unregister(host);
+	if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+		sdhci_led_unregister(host);
 
 	if (!dead)
 		sdhci_do_reset(host, SDHCI_RESET_ALL);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f0bd36c..5d9ec30 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -19,7 +19,7 @@
 #include <linux/io.h>
 #include <linux/leds.h>
 #include <linux/interrupt.h>
-
+#include <linux/ratelimit.h>
 #include <linux/mmc/host.h>
 
 /*
@@ -151,12 +151,16 @@
 #define  SDHCI_INT_ERROR_MASK	0xFFFF8000
 
 #define  SDHCI_INT_CMD_MASK	(SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
-		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
+				SDHCI_INT_ACMD12ERR)
+
 #define  SDHCI_INT_DATA_MASK	(SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
 		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
 		SDHCI_INT_BLK_GAP)
+
+#define SDHCI_INT_CMDQ_EN	(0x1 << 14)
 #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
 
 #define SDHCI_CQE_INT_ERR_MASK ( \
@@ -166,7 +170,13 @@
 
 #define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
 
-#define SDHCI_ACMD12_ERR	0x3C
+#define SDHCI_ACMD12_ERR		0x3C
+#define SDHCI_AUTO_CMD12_NOT_EXEC	0x0001
+#define SDHCI_AUTO_CMD_TIMEOUT_ERR	0x0002
+#define SDHCI_AUTO_CMD_CRC_ERR		0x0004
+#define SDHCI_AUTO_CMD_ENDBIT_ERR	0x0008
+#define SDHCI_AUTO_CMD_INDEX_ERR	0x0010
+#define SDHCI_AUTO_CMD12_NOT_ISSUED	0x0080
 
 #define SDHCI_HOST_CONTROL2		0x3E
 #define  SDHCI_CTRL_UHS_MASK		0x0007
@@ -184,6 +194,7 @@
 #define   SDHCI_CTRL_DRV_TYPE_D		0x0030
 #define  SDHCI_CTRL_EXEC_TUNING		0x0040
 #define  SDHCI_CTRL_TUNED_CLK		0x0080
+#define  SDHCI_CTRL_ASYNC_INT_ENABLE	0x4000
 #define  SDHCI_CTRL_PRESET_VAL_ENABLE	0x8000
 
 #define SDHCI_CAPABILITIES	0x40
@@ -205,6 +216,7 @@
 #define  SDHCI_CAN_VDD_300	0x02000000
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
+#define  SDHCI_CAN_ASYNC_INT	0x20000000
 
 #define  SDHCI_SUPPORT_SDR50	0x00000001
 #define  SDHCI_SUPPORT_SDR104	0x00000002
@@ -346,6 +358,12 @@
 	COOKIE_MAPPED,		/* mapped by sdhci_prepare_data() */
 };
 
+enum sdhci_power_policy {
+	SDHCI_PERFORMANCE_MODE,
+	SDHCI_POWER_SAVE_MODE,
+	SDHCI_POWER_POLICY_NUM /* Always keep this one last */
+};
+
 struct sdhci_host {
 	/* Data set by hardware interface driver */
 	const char *hw_name;	/* Hardware bus name */
@@ -451,6 +469,83 @@
  */
 #define SDHCI_QUIRK2_DISABLE_HW_TIMEOUT			(1<<17)
 
+/*
+ * Read Transfer Active/ Write Transfer Active may be not
+ * de-asserted after end of transaction. Issue reset for DAT line.
+ */
+#define SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT                 (1<<18)
+/*
+ * Slow interrupt clearance at 400KHz may cause
+ * host controller driver interrupt handler to
+ * be called twice.
+ */
+#define SDHCI_QUIRK2_SLOW_INT_CLR                       (1<<19)
+
+/*
+ * If the base clock can be scalable, then there should be no further
+ * clock dividing as the input clock itself will be scaled down to
+ * required frequency.
+ */
+#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK		(1<<20)
+
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD		(1<<21)
+
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE		(1<<22)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT		(1<<23)
+/*
+ * This is applicable for controllers that advertize timeout clock
+ * value in capabilities register (bit 5-0) as just 50MHz whereas the
+ * base clock frequency is 200MHz. So, the controller internally
+ * multiplies the value in timeout control register by 4 with the
+ * assumption that driver always uses fixed timeout clock value from
+ * capabilities register to calculate the timeout. But when the driver
+ * uses SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK base clock frequency is directly
+ * controller by driver and it's rate varies upto max. 200MHz. This new quirk
+ * will be used in such cases to avoid controller mulplication when timeout is
+ * calculated based on the base clock.
+ */
+#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 24)
+
+/*
+ * Some SDHC controllers are unable to handle data-end bit error in
+ * 1-bit mode of SDIO.
+ */
+#define SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR             (1<<25)
+
+/* Use reset workaround in case sdhci reset timeouts */
+#define SDHCI_QUIRK2_USE_RESET_WORKAROUND		(1<<26)
+
+/* Some controllers doesn't have have any LED control */
+#define SDHCI_QUIRK2_BROKEN_LED_CONTROL			(1<<27)
+
+/*
+ * Some controllers doesn't follow the tuning procedure as defined in spec.
+ * The tuning data has to be compared from SW driver to validate the correct
+ * phase.
+ */
+#define SDHCI_QUIRK2_NON_STANDARD_TUNING (1 << 28)
+/*
+ * Some controllers may use PIO mode to workaround HW issues in ADMA for
+ * eMMC tuning commands.
+ */
+#define SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING (1 << 29)
+
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 	char *bounce_buffer;	/* For packing SDMA reads/writes */
@@ -463,6 +558,7 @@
 	struct mmc_host *mmc;	/* MMC structure */
 	struct mmc_host_ops mmc_host_ops;	/* MMC host ops */
 	u64 dma_mask;		/* custom DMA mask */
+	u64 coherent_dma_mask;
 
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
 	struct led_classdev led;	/* LED control */
@@ -486,6 +582,7 @@
 #define SDHCI_SIGNALING_330	(1<<14)	/* Host is capable of 3.3V signaling */
 #define SDHCI_SIGNALING_180	(1<<15)	/* Host is capable of 1.8V signaling */
 #define SDHCI_SIGNALING_120	(1<<16)	/* Host is capable of 1.2V signaling */
+#define SDHCI_HOST_IRQ_STATUS	(1<<17) /* host->irq status */
 
 	unsigned int version;	/* SDHCI spec. version */
 
@@ -501,8 +598,10 @@
 	bool preset_enabled;	/* Preset is enabled */
 	bool pending_reset;	/* Cmd/data reset is pending */
 	bool irq_wake_enabled;	/* IRQ wakeup is enabled */
+	bool cdr_support;
 
 	struct mmc_request *mrqs_done[SDHCI_MAX_MRQS];	/* Requests done */
+	struct mmc_request *mrq;	/* Current request */
 	struct mmc_command *cmd;	/* Current command */
 	struct mmc_command *data_cmd;	/* Current data command */
 	struct mmc_data *data;	/* Current data request */
@@ -565,6 +664,20 @@
 
 	u64			data_timeout;
 
+	ktime_t data_start_time;
+
+	enum sdhci_power_policy power_policy;
+
+	bool sdio_irq_async_status;
+	bool is_crypto_en;
+	bool crypto_reset_reqd;
+
+	u32 auto_cmd_err_sts;
+	struct ratelimit_state dbg_dump_rs;
+	int reset_wa_applied; /* reset workaround status */
+	ktime_t reset_wa_t; /* time when the reset workaround is applied */
+	int reset_wa_cnt; /* total number of times workaround is used */
+
 	unsigned long private[0] ____cacheline_aligned;
 };
 
@@ -598,11 +711,44 @@
 	unsigned int    (*get_ro)(struct sdhci_host *host);
 	void		(*reset)(struct sdhci_host *host, u8 mask);
 	int	(*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+	int	(*crypto_engine_cfg)(struct sdhci_host *host,
+			struct mmc_request *mrq, u32 slot);
+	int	(*crypto_engine_cmdq_cfg)(struct sdhci_host *host,
+			struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+	int	(*crypto_engine_cfg_end)(struct sdhci_host *host,
+			struct mmc_request *mrq);
+	int	(*crypto_engine_reset)(struct sdhci_host *host);
+	void	(*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
 	void	(*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
 	void	(*hw_reset)(struct sdhci_host *host);
 	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+	unsigned int	(*get_max_segments)(void);
+#define REQ_BUS_OFF     (1 << 0)
+#define REQ_BUS_ON      (1 << 1)
+#define REQ_IO_LOW      (1 << 2)
+#define REQ_IO_HIGH     (1 << 3)
 	void    (*card_event)(struct sdhci_host *host);
+	int	(*enhanced_strobe)(struct sdhci_host *host);
+	void    (*platform_bus_voting)(struct sdhci_host *host, u32 enable);
+	void	(*toggle_cdr)(struct sdhci_host *host, bool enable);
+	void    (*check_power_status)(struct sdhci_host *host, u32 req_type);
+	int     (*config_auto_tuning_cmd)(struct sdhci_host *host,
+					bool enable, u32 type);
+	int     (*enable_controller_clock)(struct sdhci_host *host);
+	void	(*clear_set_dumpregs)(struct sdhci_host *host, bool set);
+	void	(*enhanced_strobe_mask)(struct sdhci_host *host, bool set);
+	void    (*dump_vendor_regs)(struct sdhci_host *host);
 	void	(*voltage_switch)(struct sdhci_host *host);
+	int	(*select_drive_strength)(struct sdhci_host *host,
+					 struct mmc_card *card,
+					 unsigned int max_dtr, int host_drv,
+					 int card_drv, int *drv_type);
+	int	(*notify_load)(struct sdhci_host *host, enum mmc_load state);
+	void	(*reset_workaround)(struct sdhci_host *host, u32 enable);
+	void	(*init)(struct sdhci_host *host);
+	void	(*pre_req)(struct sdhci_host *host, struct mmc_request *req);
+	void	(*post_req)(struct sdhci_host *host, struct mmc_request *req);
+	unsigned int	(*get_current_limit)(struct sdhci_host *host);
 };
 
 #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -753,4 +899,5 @@
 void sdhci_reset_tuning(struct sdhci_host *host);
 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
 
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync);
 #endif /* __SDHCI_HW_H */
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index d8f7e37..e5f069f 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -274,7 +274,6 @@
 }
 EXPORT_SYMBOL(cnss_wlan_disable);
 
-#ifdef CONFIG_CNSS2_DEBUG
 int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
 		      u32 data_len, u8 *output)
 {
@@ -332,21 +331,6 @@
 	return ret;
 }
 EXPORT_SYMBOL(cnss_athdiag_write);
-#else
-int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
-		      u32 data_len, u8 *output)
-{
-	return -EPERM;
-}
-EXPORT_SYMBOL(cnss_athdiag_read);
-
-int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
-		       u32 data_len, u8 *input)
-{
-	return -EPERM;
-}
-EXPORT_SYMBOL(cnss_athdiag_write);
-#endif
 
 int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
 {
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 491294b..8ca4f68 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1546,9 +1546,8 @@
 	if (!plat_priv)
 		return -ENODEV;
 
-	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
-		cnss_pr_err("RDDM already collected 0x%x, return\n",
-			    pci_priv->mhi_state);
+	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev)) {
+		cnss_pr_info("Device is already in bad state, ignore force assert\n");
 		return 0;
 	}
 
@@ -2284,6 +2283,7 @@
 	case CNSS_MHI_POWER_OFF:
 	case CNSS_MHI_FORCE_POWER_OFF:
 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
 		break;
 	case CNSS_MHI_SUSPEND:
@@ -2293,6 +2293,7 @@
 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
 		break;
 	case CNSS_MHI_TRIGGER_RDDM:
+		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
 		break;
 	case CNSS_MHI_RDDM_DONE:
 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index b065eb6..26e114f 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -59,3 +59,11 @@
 source "drivers/nfc/s3fwrn5/Kconfig"
 source "drivers/nfc/st95hf/Kconfig"
 endmenu
+
+config NFC_NQ
+        tristate "QTI NCI based NFC Controller Driver for NQx"
+        depends on I2C
+        help
+          This enables the NFC driver for NQx based devices.
+          This is for i2c connected version. NCI protocol logic
+          resides in the usermode and it has no other NFC dependencies.
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 5393ba5..5661fd6 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -17,3 +17,4 @@
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
 obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
 obj-$(CONFIG_NFC_ST95HF)	+= st95hf/
+obj-$(CONFIG_NFC_NQ)            += nq-nci.o
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
new file mode 100644
index 0000000..354bece
--- /dev/null
+++ b/drivers/nfc/nq-nci.c
@@ -0,0 +1,1425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include "nq-nci.h"
+#include <linux/clk.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+struct nqx_platform_data {
+	unsigned int irq_gpio;
+	unsigned int en_gpio;
+	unsigned int clkreq_gpio;
+	unsigned int firm_gpio;
+	unsigned int ese_gpio;
+	const char *clk_src_name;
+	/* NFC_CLK pin voting state */
+	bool clk_pin_voting;
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,nq-nci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_match_table);
+
+#define DEV_COUNT	1
+#define DEVICE_NAME	"nq-nci"
+#define CLASS_NAME	"nqx"
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define MAX_RETRY_COUNT			3
+#define NCI_RESET_CMD_LEN		4
+#define NCI_INIT_CMD_LEN		3
+#define NCI_RESET_RSP_LEN		6
+#define NCI_INIT_RSP_LEN		28
+#define NCI_GET_VERSION_CMD_LEN		8
+#define NCI_GET_VERSION_RSP_LEN		12
+
+struct nqx_dev {
+	wait_queue_head_t	read_wq;
+	struct	mutex		read_mutex;
+	struct	i2c_client	*client;
+	dev_t			devno;
+	struct class		*nqx_class;
+	struct device		*nqx_device;
+	struct cdev		c_dev;
+	union  nqx_uinfo	nqx_info;
+	/* NFC GPIO variables */
+	unsigned int		irq_gpio;
+	unsigned int		en_gpio;
+	unsigned int		firm_gpio;
+	unsigned int		clkreq_gpio;
+	unsigned int		ese_gpio;
+	/* NFC VEN pin state powered by Nfc */
+	bool			nfc_ven_enabled;
+	/* NFC_IRQ state */
+	bool			irq_enabled;
+	/* NFC_IRQ wake-up state */
+	bool			irq_wake_up;
+	spinlock_t		irq_enabled_lock;
+	unsigned int		count_irq;
+	/* Initial CORE RESET notification */
+	unsigned int		core_reset_ntf;
+	/* CLK control */
+	bool			clk_run;
+	struct	clk		*s_clk;
+	/* read buffer*/
+	size_t kbuflen;
+	u8 *kbuf;
+	struct nqx_platform_data *pdata;
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			void *v);
+/*clock enable function*/
+static int nqx_clock_select(struct nqx_dev *nqx_dev);
+/*clock disable function*/
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static struct notifier_block nfcc_notifier = {
+	.notifier_call	= nfcc_reboot,
+	.next			= NULL,
+	.priority		= 0
+};
+
+unsigned int	disable_ctrl;
+
+static void nqx_init_stat(struct nqx_dev *nqx_dev)
+{
+	nqx_dev->count_irq = 0;
+}
+
+static void nqx_disable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (nqx_dev->irq_enabled) {
+		disable_irq_nosync(nqx_dev->client->irq);
+		nqx_dev->irq_enabled = false;
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+/**
+ * nqx_enable_irq()
+ *
+ * Check if interrupt is enabled or not
+ * and enable interrupt
+ *
+ * Return: void
+ */
+static void nqx_enable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (!nqx_dev->irq_enabled) {
+		nqx_dev->irq_enabled = true;
+		enable_irq(nqx_dev->client->irq);
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
+{
+	struct nqx_dev *nqx_dev = dev_id;
+	unsigned long flags;
+
+	if (device_may_wakeup(&nqx_dev->client->dev))
+		pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
+
+	nqx_disable_irq(nqx_dev);
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	nqx_dev->count_irq++;
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+	wake_up(&nqx_dev->read_wq);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t nfc_read(struct file *filp, char __user *buf,
+					size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	unsigned char *tmp = NULL;
+	int ret;
+	int irq_gpio_val = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > nqx_dev->kbuflen)
+		count = nqx_dev->kbuflen;
+
+	dev_dbg(&nqx_dev->client->dev, "%s : reading %zu bytes.\n",
+			__func__, count);
+
+	mutex_lock(&nqx_dev->read_mutex);
+
+	irq_gpio_val = gpio_get_value(nqx_dev->irq_gpio);
+	if (irq_gpio_val == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			dev_err(&nqx_dev->client->dev,
+			":f_falg has O_NONBLOCK. EAGAIN\n");
+			ret = -EAGAIN;
+			goto err;
+		}
+		while (1) {
+			ret = 0;
+			if (!nqx_dev->irq_enabled) {
+				nqx_dev->irq_enabled = true;
+				enable_irq(nqx_dev->client->irq);
+			}
+			if (!gpio_get_value(nqx_dev->irq_gpio)) {
+				ret = wait_event_interruptible(nqx_dev->read_wq,
+					!nqx_dev->irq_enabled);
+			}
+			if (ret)
+				goto err;
+			nqx_disable_irq(nqx_dev);
+
+			if (gpio_get_value(nqx_dev->irq_gpio))
+				break;
+			dev_err_ratelimited(&nqx_dev->client->dev,
+			"gpio is low, no need to read data\n");
+		}
+	}
+
+	tmp = nqx_dev->kbuf;
+	if (!tmp) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+	memset(tmp, 0x00, count);
+
+	/* Read data */
+	ret = i2c_master_recv(nqx_dev->client, tmp, count);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: i2c_master_recv returned %d\n", __func__, ret);
+		goto err;
+	}
+	if (ret > count) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: received too many bytes from i2c (%d)\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+#ifdef NFC_KERNEL_BU
+		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
+			__func__, tmp[0], tmp[1], tmp[2]);
+#endif
+	if (copy_to_user(buf, tmp, ret)) {
+		dev_warn(&nqx_dev->client->dev,
+			"%s : failed to copy to user space\n", __func__);
+		ret = -EFAULT;
+		goto err;
+	}
+	mutex_unlock(&nqx_dev->read_mutex);
+	return ret;
+
+err:
+	mutex_unlock(&nqx_dev->read_mutex);
+out:
+	return ret;
+}
+
+static ssize_t nfc_write(struct file *filp, const char __user *buf,
+				size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	char *tmp = NULL;
+	int ret = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (count > nqx_dev->kbuflen) {
+		dev_err(&nqx_dev->client->dev, "%s: out of memory\n",
+			__func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tmp = memdup_user(buf, count);
+	if (IS_ERR(tmp)) {
+		dev_err(&nqx_dev->client->dev, "%s: memdup_user failed\n",
+			__func__);
+		ret = PTR_ERR(tmp);
+		goto out;
+	}
+
+	ret = i2c_master_send(nqx_dev->client, tmp, count);
+	if (ret != count) {
+		dev_err(&nqx_dev->client->dev,
+		"%s: failed to write %d\n", __func__, ret);
+		ret = -EIO;
+		goto out_free;
+	}
+#ifdef NFC_KERNEL_BU
+	dev_dbg(&nqx_dev->client->dev,
+			"%s : i2c-%d: NfcNciTx %x %x %x\n",
+			__func__, iminor(file_inode(filp)),
+			tmp[0], tmp[1], tmp[2]);
+#endif
+	usleep_range(1000, 1100);
+out_free:
+	kfree(tmp);
+out:
+	return ret;
+}
+
+/**
+ * nqx_standby_write()
+ * @buf:       pointer to data buffer
+ * @len:       # of bytes need to transfer
+ *
+ * write data buffer over I2C and retry
+ * if NFCC is in stand by mode
+ *
+ * Return: # of bytes written or -ve value in case of error
+ */
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len)
+{
+	int ret = -EINVAL;
+	int retry_cnt;
+
+	for (retry_cnt = 1; retry_cnt <= MAX_RETRY_COUNT; retry_cnt++) {
+		ret = i2c_master_send(nqx_dev->client, buf, len);
+		if (ret < 0) {
+			dev_err(&nqx_dev->client->dev,
+				"%s: write failed, Maybe in Standby Mode - Retry(%d)\n",
+				 __func__, retry_cnt);
+			usleep_range(1000, 1100);
+		} else if (ret == len)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Power management of the eSE
+ * NFC & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE OFF : NFC_EN low and eSE_pwr_req low.
+ */
+static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
+{
+	int r = -1;
+	const unsigned char svdd_off_cmd_warn[] =  {0x2F, 0x31, 0x01, 0x01};
+	const unsigned char svdd_off_cmd_done[] =  {0x2F, 0x31, 0x01, 0x00};
+
+	if (!gpio_is_valid(nqx_dev->ese_gpio)) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: ese_gpio is not valid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (arg == 0) {
+		/*
+		 * We want to power on the eSE and to do so we need the
+		 * eSE_pwr_req pin and the NFC_EN pin to be high
+		 */
+		if (gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+			r = 0;
+		} else {
+			/**
+			 * Let's store the NFC_EN pin state
+			 * only if the eSE is not yet on
+			 */
+			nqx_dev->nfc_ven_enabled =
+					gpio_get_value(nqx_dev->en_gpio);
+			if (!nqx_dev->nfc_ven_enabled) {
+				gpio_set_value(nqx_dev->en_gpio, 1);
+				/* hardware dependent delay */
+				usleep_range(1000, 1100);
+			}
+			gpio_set_value(nqx_dev->ese_gpio, 1);
+			usleep_range(1000, 1100);
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
+				r = 0;
+			}
+		}
+	} else if (arg == 1) {
+		if (nqx_dev->nfc_ven_enabled &&
+			((nqx_dev->nqx_info.info.chip_type == NFCC_NQ_220) ||
+			(nqx_dev->nqx_info.info.chip_type == NFCC_PN66T))) {
+			/**
+			 * Let's inform the CLF we're
+			 * powering off the eSE
+			 */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_warn,
+						sizeof(svdd_off_cmd_warn));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_warn sent\n", __func__);
+
+			/* let's power down the eSE */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: nqx_dev->ese_gpio set to 0\n", __func__);
+
+			/**
+			 * Time needed for the SVDD capacitor
+			 * to get discharged
+			 */
+			usleep_range(8000, 8100);
+
+			/* Let's inform the CLF the eSE is now off */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_done,
+						sizeof(svdd_off_cmd_done));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_done sent\n", __func__);
+		} else {
+			/**
+			 * In case the NFC is off,
+			 * there's no need to send the i2c commands
+			 */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+			usleep_range(1000, 1100);
+		}
+
+		if (!gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
+			r = 0;
+		}
+
+		if (!nqx_dev->nfc_ven_enabled) {
+			/* hardware dependent delay */
+			usleep_range(1000, 1100);
+			dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+	} else if (arg == 3) {
+		r = gpio_get_value(nqx_dev->ese_gpio);
+	}
+	return r;
+}
+
+static int nfc_open(struct inode *inode, struct file *filp)
+{
+	struct nqx_dev *nqx_dev = container_of(inode->i_cdev,
+				struct nqx_dev, c_dev);
+
+	filp->private_data = nqx_dev;
+	nqx_init_stat(nqx_dev);
+
+	dev_dbg(&nqx_dev->client->dev,
+			"%s: %d,%d\n", __func__, imajor(inode), iminor(inode));
+	return 0;
+}
+
+/*
+ * nfc_ioctl_power_states() - power control
+ * @filp:	pointer to the file descriptor
+ * @arg:	mode that we want to move to
+ *
+ * Device power control. Depending on the arg value, device moves to
+ * different states
+ * (arg = 0): NFC_ENABLE	GPIO = 0, FW_DL GPIO = 0
+ * (arg = 1): NFC_ENABLE	GPIO = 1, FW_DL GPIO = 0
+ * (arg = 2): FW_DL GPIO = 1
+ *
+ * Return: -ENOIOCTLCMD if arg is not supported, 0 in any other case
+ */
+int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
+{
+	int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	if (arg == 0) {
+		/*
+		 * We are attempting a hardware reset so let us disable
+		 * interrupts to avoid spurious notifications to upper
+		 * layers.
+		 */
+		nqx_disable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value disable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		}
+
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (!gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+				gpio_set_value(nqx_dev->en_gpio, 0);
+				usleep_range(10000, 10100);
+			} else {
+				dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+			}
+		} else {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+			usleep_range(10000, 10100);
+		}
+		if (nqx_dev->pdata->clk_pin_voting) {
+			r = nqx_clock_deselect(nqx_dev);
+			if (r < 0)
+				dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+		}
+		nqx_dev->nfc_ven_enabled = false;
+	} else if (arg == 1) {
+		nqx_enable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value enable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+		if (nqx_dev->pdata->clk_pin_voting) {
+			r = nqx_clock_select(nqx_dev);
+			if (r < 0)
+				dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+		}
+		nqx_dev->nfc_ven_enabled = true;
+	} else if (arg == 2) {
+		/*
+		 * We are switching to Dowload Mode, toggle the enable pin
+		 * in order to set the NFCC in the new mode
+		 */
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_err(&nqx_dev->client->dev,
+				"FW download forbidden while ese is on\n");
+				return -EBUSY; /* Device or resource busy */
+			}
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+			usleep_range(10000, 10100);
+		}
+		gpio_set_value(nqx_dev->en_gpio, 0);
+		usleep_range(10000, 10100);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+	} else if (arg == 4) {
+		/*
+		 * Setting firmware download gpio to HIGH for SN100U
+		 * before FW download start
+		 */
+		dev_dbg(&nqx_dev->client->dev, "SN100 fw gpio HIGH\n");
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+			usleep_range(10000, 10100);
+		} else
+			dev_err(&nqx_dev->client->dev,
+				"firm_gpio is invalid\n");
+	} else if (arg == 6) {
+		/*
+		 * Setting firmware download gpio to LOW for SN100U
+		 * FW download finished
+		 */
+		dev_dbg(&nqx_dev->client->dev, "SN100 fw gpio LOW\n");
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+			usleep_range(10000, 10100);
+		} else {
+			dev_err(&nqx_dev->client->dev,
+				"firm_gpio is invalid\n");
+		}
+	} else {
+		r = -ENOIOCTLCMD;
+	}
+
+	return r;
+}
+
+#ifdef CONFIG_COMPAT
+static long nfc_compat_ioctl(struct file *pfile, unsigned int cmd,
+				unsigned long arg)
+{
+	long r = 0;
+
+	arg = (compat_u64)arg;
+	switch (cmd) {
+	case NFC_SET_PWR:
+		nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	default:
+		r = -ENOTTY;
+	}
+	return r;
+}
+#endif
+
+/*
+ * nfc_ioctl_core_reset_ntf()
+ * @filp:       pointer to the file descriptor
+ *
+ * Allows callers to determine if a CORE_RESET_NTF has arrived
+ *
+ * Return: the value of variable core_reset_ntf
+ */
+int nfc_ioctl_core_reset_ntf(struct file *filp)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	dev_dbg(&nqx_dev->client->dev, "%s: returning = %d\n", __func__,
+		nqx_dev->core_reset_ntf);
+	return nqx_dev->core_reset_ntf;
+}
+
+/*
+ * Inside nfc_ioctl_nfcc_info
+ *
+ * @brief   nfc_ioctl_nfcc_info
+ *
+ * Check the NQ Chipset and firmware version details
+ */
+unsigned int nfc_ioctl_nfcc_info(struct file *filp, unsigned long arg)
+{
+	unsigned int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	r = nqx_dev->nqx_info.i;
+	dev_dbg(&nqx_dev->client->dev,
+		"nqx nfc : %s r = %d\n", __func__, r);
+
+	return r;
+}
+
+static long nfc_ioctl(struct file *pfile, unsigned int cmd,
+			unsigned long arg)
+{
+	int r = 0;
+
+	switch (cmd) {
+	case NFC_SET_PWR:
+		r = nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	case NFCC_INITIAL_CORE_RESET_NTF:
+		r = nfc_ioctl_core_reset_ntf(pfile);
+		break;
+	case NFCC_GET_INFO:
+		r = nfc_ioctl_nfcc_info(pfile, arg);
+		break;
+	default:
+		r = -ENOIOCTLCMD;
+	}
+	return r;
+}
+
+static const struct file_operations nfc_dev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read  = nfc_read,
+	.write = nfc_write,
+	.open = nfc_open,
+	.unlocked_ioctl = nfc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = nfc_compat_ioctl
+#endif
+};
+
+/* Check for availability of NQ_ NFC controller hardware */
+static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int ret = 0;
+
+	int gpio_retry_count = 0;
+	unsigned char init_rsp_len = 0;
+	unsigned int enable_gpio = nqx_dev->en_gpio;
+	char *nci_reset_cmd = NULL;
+	char *nci_init_cmd = NULL;
+	char *nci_init_rsp = NULL;
+	char *nci_reset_rsp = NULL;
+	char *nci_get_version_cmd = NULL;
+	char *nci_get_version_rsp = NULL;
+
+	nci_reset_cmd = kzalloc(NCI_RESET_CMD_LEN + 1, GFP_DMA | GFP_KERNEL);
+	if (!nci_reset_cmd) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	nci_reset_rsp = kzalloc(NCI_RESET_RSP_LEN + 1,  GFP_DMA | GFP_KERNEL);
+	if (!nci_reset_rsp) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	nci_init_cmd = kzalloc(NCI_INIT_CMD_LEN + 1,  GFP_DMA | GFP_KERNEL);
+	if (!nci_init_cmd) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	nci_init_rsp = kzalloc(NCI_INIT_RSP_LEN + 1,  GFP_DMA | GFP_KERNEL);
+	if (!nci_init_rsp) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	nci_get_version_cmd = kzalloc(NCI_GET_VERSION_CMD_LEN + 1,
+					GFP_DMA | GFP_KERNEL);
+	if (!nci_get_version_cmd) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	nci_get_version_rsp = kzalloc(NCI_GET_VERSION_RSP_LEN + 1,
+					GFP_DMA | GFP_KERNEL);
+	if (!nci_get_version_rsp) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+reset_enable_gpio:
+	/* making sure that the NFCC starts in a clean state. */
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	/* hardware dependent delay */
+	usleep_range(10000, 10100);
+	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
+	/* hardware dependent delay */
+	usleep_range(10000, 10100);
+
+	nci_reset_cmd[0] = 0x20;
+	nci_reset_cmd[1] = 0x00;
+	nci_reset_cmd[2] = 0x01;
+	nci_reset_cmd[3] = 0x00;
+	/* send NCI CORE RESET CMD with Keep Config parameters */
+	ret = i2c_master_send(client, nci_reset_cmd, NCI_RESET_CMD_LEN);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send core reset Error\n", __func__);
+
+		if (gpio_is_valid(nqx_dev->firm_gpio)) {
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+			usleep_range(10000, 10100);
+		}
+		gpio_set_value(nqx_dev->en_gpio, 0);
+		usleep_range(10000, 10100);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		usleep_range(10000, 10100);
+
+		nci_get_version_cmd[0] = 0x00;
+		nci_get_version_cmd[1] = 0x04;
+		nci_get_version_cmd[2] = 0xF1;
+		nci_get_version_cmd[3] = 0x00;
+		nci_get_version_cmd[4] = 0x00;
+		nci_get_version_cmd[5] = 0x00;
+		nci_get_version_cmd[6] = 0x6E;
+		nci_get_version_cmd[7] = 0xEF;
+		ret = i2c_master_send(client, nci_get_version_cmd,
+						NCI_GET_VERSION_CMD_LEN);
+
+		if (ret < 0) {
+			dev_err(&client->dev,
+				"%s: - i2c_master_send get version cmd Error\n",
+				__func__);
+			goto err_nfcc_hw_check;
+		}
+		/* hardware dependent delay */
+		usleep_range(10000, 10100);
+
+		ret = i2c_master_recv(client, nci_get_version_rsp,
+						NCI_GET_VERSION_RSP_LEN);
+		if (ret < 0) {
+			dev_err(&client->dev,
+				"%s: - i2c_master_recv get version rsp Error\n",
+				__func__);
+			goto err_nfcc_hw_check;
+		} else {
+			nqx_dev->nqx_info.info.chip_type =
+				nci_get_version_rsp[3];
+			nqx_dev->nqx_info.info.rom_version =
+				nci_get_version_rsp[4];
+			nqx_dev->nqx_info.info.fw_minor =
+				nci_get_version_rsp[6];
+			nqx_dev->nqx_info.info.fw_major =
+				nci_get_version_rsp[7];
+		}
+		goto err_nfcc_reset_failed;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+
+	/* Read Response of RESET command */
+	ret = i2c_master_recv(client, nci_reset_rsp, NCI_RESET_RSP_LEN);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		gpio_retry_count = gpio_retry_count + 1;
+		if (gpio_retry_count < MAX_RETRY_COUNT)
+			goto reset_enable_gpio;
+		goto err_nfcc_hw_check;
+	}
+	nci_init_cmd[0] = 0x20;
+	nci_init_cmd[1] = 0x01;
+	nci_init_cmd[2] = 0x00;
+	ret = nqx_standby_write(nqx_dev, nci_init_cmd, NCI_INIT_CMD_LEN);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send failed for Core INIT\n", __func__);
+		goto err_nfcc_core_init_fail;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+	/* Read Response of INIT command */
+	ret = i2c_master_recv(client, nci_init_rsp, NCI_INIT_RSP_LEN);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_core_init_fail;
+	}
+	init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
+	if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
+		nqx_dev->nqx_info.info.chip_type =
+				nci_init_rsp[init_rsp_len - 3];
+		nqx_dev->nqx_info.info.rom_version =
+				nci_init_rsp[init_rsp_len - 2];
+		nqx_dev->nqx_info.info.fw_major =
+				nci_init_rsp[init_rsp_len - 1];
+		nqx_dev->nqx_info.info.fw_minor =
+				nci_init_rsp[init_rsp_len];
+	}
+	dev_dbg(&client->dev,
+		"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+		__func__, nci_reset_rsp[0],
+		nci_reset_rsp[1], nci_reset_rsp[2]);
+
+err_nfcc_reset_failed:
+	dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
+		nqx_dev->nqx_info.info.chip_type);
+	dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
+		nqx_dev->nqx_info.info.rom_version,
+		nqx_dev->nqx_info.info.fw_major,
+		nqx_dev->nqx_info.info.fw_minor);
+
+	switch (nqx_dev->nqx_info.info.chip_type) {
+	case NFCC_NQ_210:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ210 ##\n", __func__);
+		break;
+	case NFCC_NQ_220:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ220 ##\n", __func__);
+		break;
+	case NFCC_NQ_310:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ310 ##\n", __func__);
+		break;
+	case NFCC_NQ_330:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ330 ##\n", __func__);
+		break;
+	case NFCC_PN66T:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == PN66T ##\n", __func__);
+		break;
+	default:
+		dev_err(&client->dev,
+		"%s: - NFCC HW not Supported\n", __func__);
+		break;
+	}
+
+	/*Disable NFC by default to save power on boot*/
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	ret = 0;
+	goto done;
+
+err_nfcc_core_init_fail:
+	dev_err(&client->dev,
+	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+	__func__, nci_reset_rsp[0],
+	nci_reset_rsp[1], nci_reset_rsp[2]);
+
+err_nfcc_hw_check:
+	ret = -ENXIO;
+	dev_err(&client->dev,
+		"%s: - NFCC HW not available\n", __func__);
+
+done:
+	kfree(nci_reset_rsp);
+	kfree(nci_init_rsp);
+	kfree(nci_init_cmd);
+	kfree(nci_reset_cmd);
+	kfree(nci_get_version_cmd);
+	kfree(nci_get_version_rsp);
+
+	return ret;
+}
+
+/*
+ * Routine to enable clock.
+ * this routine can be extended to select from multiple
+ * sources based on clk_src_name.
+ */
+static int nqx_clock_select(struct nqx_dev *nqx_dev)
+{
+	int r = 0;
+
+	nqx_dev->s_clk = clk_get(&nqx_dev->client->dev, "ref_clk");
+
+	if (nqx_dev->s_clk == NULL)
+		goto err_clk;
+
+	if (!nqx_dev->clk_run)
+		r = clk_prepare_enable(nqx_dev->s_clk);
+
+	if (r)
+		goto err_clk;
+
+	nqx_dev->clk_run = true;
+
+	return r;
+
+err_clk:
+	r = -1;
+	return r;
+}
+
+/*
+ * Routine to disable clocks
+ */
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev)
+{
+	int r = -1;
+
+	if (nqx_dev->s_clk != NULL) {
+		if (nqx_dev->clk_run) {
+			clk_disable_unprepare(nqx_dev->s_clk);
+			nqx_dev->clk_run = false;
+		}
+		return 0;
+	}
+	return r;
+}
+
+static int nfc_parse_dt(struct device *dev, struct nqx_platform_data *pdata)
+{
+	int r = 0;
+	struct device_node *np = dev->of_node;
+
+	pdata->en_gpio = of_get_named_gpio(np, "qcom,nq-ven", 0);
+	if ((!gpio_is_valid(pdata->en_gpio)))
+		return -EINVAL;
+	disable_ctrl = pdata->en_gpio;
+
+	pdata->irq_gpio = of_get_named_gpio(np, "qcom,nq-irq", 0);
+	if ((!gpio_is_valid(pdata->irq_gpio)))
+		return -EINVAL;
+
+	pdata->firm_gpio = of_get_named_gpio(np, "qcom,nq-firm", 0);
+	if (!gpio_is_valid(pdata->firm_gpio)) {
+		dev_warn(dev,
+			"FIRM GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->firm_gpio = -EINVAL;
+	}
+
+	pdata->ese_gpio = of_get_named_gpio(np, "qcom,nq-esepwr", 0);
+	if (!gpio_is_valid(pdata->ese_gpio)) {
+		dev_warn(dev,
+			"ese GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->ese_gpio = -EINVAL;
+	}
+
+	if (of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name))
+		pdata->clk_pin_voting = false;
+	else
+		pdata->clk_pin_voting = true;
+
+	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
+
+	return r;
+}
+
+static inline int gpio_input_init(const struct device * const dev,
+			const int gpio, const char * const gpio_name)
+{
+	int r = gpio_request(gpio, gpio_name);
+
+	if (r) {
+		dev_err(dev, "unable to request gpio [%d]\n", gpio);
+		return r;
+	}
+
+	r = gpio_direction_input(gpio);
+	if (r)
+		dev_err(dev, "unable to set direction for gpio [%d]\n", gpio);
+
+	return r;
+}
+
+static int nqx_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int r = 0;
+	int irqn = 0;
+	struct nqx_platform_data *platform_data;
+	struct nqx_dev *nqx_dev;
+
+	dev_dbg(&client->dev, "%s: enter\n", __func__);
+	if (client->dev.of_node) {
+		platform_data = devm_kzalloc(&client->dev,
+			sizeof(struct nqx_platform_data), GFP_KERNEL);
+		if (!platform_data) {
+			r = -ENOMEM;
+			goto err_platform_data;
+		}
+		r = nfc_parse_dt(&client->dev, platform_data);
+		if (r)
+			goto err_free_data;
+	} else
+		platform_data = client->dev.platform_data;
+
+	dev_dbg(&client->dev,
+		"%s, inside nfc-nci flags = %x\n",
+		__func__, client->flags);
+
+	if (platform_data == NULL) {
+		dev_err(&client->dev, "%s: failed\n", __func__);
+		r = -ENODEV;
+		goto err_platform_data;
+	}
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
+		r = -ENODEV;
+		goto err_free_data;
+	}
+	nqx_dev = kzalloc(sizeof(*nqx_dev), GFP_KERNEL);
+	if (nqx_dev == NULL) {
+		r = -ENOMEM;
+		goto err_free_data;
+	}
+	nqx_dev->client = client;
+	nqx_dev->kbuflen = MAX_BUFFER_SIZE;
+	nqx_dev->kbuf = kzalloc(MAX_BUFFER_SIZE, GFP_KERNEL);
+	if (!nqx_dev->kbuf) {
+		dev_err(&client->dev,
+			"failed to allocate memory for nqx_dev->kbuf\n");
+		r = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	if (gpio_is_valid(platform_data->en_gpio)) {
+		r = gpio_request(platform_data->en_gpio, "nfc_reset_gpio");
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to request nfc reset gpio [%d]\n",
+				__func__,
+				platform_data->en_gpio);
+			goto err_mem;
+		}
+		r = gpio_direction_output(platform_data->en_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to set direction for nfc reset gpio [%d]\n",
+					__func__,
+					platform_data->en_gpio);
+			goto err_en_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+		"%s: nfc reset gpio not provided\n", __func__);
+		goto err_mem;
+	}
+
+	if (gpio_is_valid(platform_data->irq_gpio)) {
+		r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
+		if (r) {
+			dev_err(&client->dev, "%s: unable to request nfc irq gpio [%d]\n",
+				__func__, platform_data->irq_gpio);
+			goto err_en_gpio;
+		}
+		r = gpio_direction_input(platform_data->irq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to set direction for nfc irq gpio [%d]\n",
+				__func__,
+				platform_data->irq_gpio);
+			goto err_irq_gpio;
+		}
+		irqn = gpio_to_irq(platform_data->irq_gpio);
+		if (irqn < 0) {
+			r = irqn;
+			goto err_irq_gpio;
+		}
+		client->irq = irqn;
+	} else {
+		dev_err(&client->dev, "%s: irq gpio not provided\n", __func__);
+		goto err_en_gpio;
+	}
+	if (gpio_is_valid(platform_data->firm_gpio)) {
+		r = gpio_request(platform_data->firm_gpio,
+			"nfc_firm_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc firmware gpio [%d]\n",
+				__func__, platform_data->firm_gpio);
+			goto err_irq_gpio;
+		}
+		r = gpio_direction_output(platform_data->firm_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc firmware gpio [%d]\n",
+			__func__, platform_data->firm_gpio);
+			goto err_firm_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: firm gpio not provided\n", __func__);
+		goto err_irq_gpio;
+	}
+	if (gpio_is_valid(platform_data->ese_gpio)) {
+		r = gpio_request(platform_data->ese_gpio,
+				"nfc-ese_pwr");
+		if (r) {
+			nqx_dev->ese_gpio = -EINVAL;
+			dev_err(&client->dev,
+				"%s: unable to request nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+			/* ese gpio optional so we should continue */
+		} else {
+			nqx_dev->ese_gpio = platform_data->ese_gpio;
+			r = gpio_direction_output(platform_data->ese_gpio, 0);
+			if (r) {
+				/*
+				 * free ese gpio and set invalid
+				 * to avoid further use
+				 */
+				gpio_free(platform_data->ese_gpio);
+				nqx_dev->ese_gpio = -EINVAL;
+				dev_err(&client->dev,
+					"%s: cannot set direction for nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+				/* ese gpio optional so we should continue */
+			}
+		}
+	} else {
+		nqx_dev->ese_gpio = -EINVAL;
+		dev_err(&client->dev,
+			"%s: ese gpio not provided\n", __func__);
+		/* ese gpio optional so we should continue */
+	}
+	if (gpio_is_valid(platform_data->clkreq_gpio)) {
+		r = gpio_request(platform_data->clkreq_gpio,
+			"nfc_clkreq_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc clkreq gpio [%d]\n",
+				__func__, platform_data->clkreq_gpio);
+			goto err_ese_gpio;
+		}
+		r = gpio_direction_input(platform_data->clkreq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc clkreq gpio [%d]\n",
+			__func__, platform_data->clkreq_gpio);
+			goto err_clkreq_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: clkreq gpio not provided\n", __func__);
+		goto err_ese_gpio;
+	}
+
+	nqx_dev->en_gpio = platform_data->en_gpio;
+	nqx_dev->irq_gpio = platform_data->irq_gpio;
+	nqx_dev->firm_gpio  = platform_data->firm_gpio;
+	nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
+	nqx_dev->pdata = platform_data;
+
+	/* init mutex and queues */
+	init_waitqueue_head(&nqx_dev->read_wq);
+	mutex_init(&nqx_dev->read_mutex);
+	spin_lock_init(&nqx_dev->irq_enabled_lock);
+
+	r = alloc_chrdev_region(&nqx_dev->devno, 0, DEV_COUNT, DEVICE_NAME);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: failed to alloc chrdev region\n", __func__);
+		goto err_char_dev_register;
+	}
+
+	nqx_dev->nqx_class = class_create(THIS_MODULE, CLASS_NAME);
+	if (IS_ERR(nqx_dev->nqx_class)) {
+		dev_err(&client->dev,
+			"%s: failed to register device class\n", __func__);
+		goto err_class_create;
+	}
+
+	cdev_init(&nqx_dev->c_dev, &nfc_dev_fops);
+	r = cdev_add(&nqx_dev->c_dev, nqx_dev->devno, DEV_COUNT);
+	if (r < 0) {
+		dev_err(&client->dev, "%s: failed to add cdev\n", __func__);
+		goto err_cdev_add;
+	}
+
+	nqx_dev->nqx_device = device_create(nqx_dev->nqx_class, NULL,
+					nqx_dev->devno, nqx_dev, DEVICE_NAME);
+	if (IS_ERR(nqx_dev->nqx_device)) {
+		dev_err(&client->dev,
+			"%s: failed to create the device\n", __func__);
+		goto err_device_create;
+	}
+
+	/* NFC_INT IRQ */
+	nqx_dev->irq_enabled = true;
+	r = request_irq(client->irq, nqx_dev_irq_handler,
+			  IRQF_TRIGGER_HIGH, client->name, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: request_irq failed\n", __func__);
+		goto err_request_irq_failed;
+	}
+	nqx_disable_irq(nqx_dev);
+
+	/*
+	 * To be efficient we need to test whether nfcc hardware is physically
+	 * present before attempting further hardware initialisation.
+	 *
+	 */
+	r = nfcc_hw_check(client, nqx_dev);
+	if (r) {
+		/* make sure NFCC is not enabled */
+		gpio_set_value(platform_data->en_gpio, 0);
+		/* We don't think there is hardware switch NFC OFF */
+		goto err_request_hw_check_failed;
+	}
+
+	/* Register reboot notifier here */
+	r = register_reboot_notifier(&nfcc_notifier);
+	if (r) {
+		dev_err(&client->dev,
+			"%s: cannot register reboot notifier(err = %d)\n",
+			__func__, r);
+		/*
+		 * nfcc_hw_check function not doing memory
+		 * allocation so using same goto target here
+		 */
+		goto err_request_hw_check_failed;
+	}
+
+#ifdef NFC_KERNEL_BU
+	r = nqx_clock_select(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: nqx_clock_select failed\n", __func__);
+		goto err_clock_en_failed;
+	}
+	gpio_set_value(platform_data->en_gpio, 1);
+#endif
+	device_init_wakeup(&client->dev, true);
+	device_set_wakeup_capable(&client->dev, true);
+	i2c_set_clientdata(client, nqx_dev);
+	nqx_dev->irq_wake_up = false;
+
+	dev_err(&client->dev,
+	"%s: probing NFCC NQxxx exited successfully\n",
+		 __func__);
+	return 0;
+
+#ifdef NFC_KERNEL_BU
+err_clock_en_failed:
+	unregister_reboot_notifier(&nfcc_notifier);
+#endif
+err_request_hw_check_failed:
+	free_irq(client->irq, nqx_dev);
+err_request_irq_failed:
+	device_destroy(nqx_dev->nqx_class, nqx_dev->devno);
+err_device_create:
+	cdev_del(&nqx_dev->c_dev);
+err_cdev_add:
+	class_destroy(nqx_dev->nqx_class);
+err_class_create:
+	unregister_chrdev_region(nqx_dev->devno, DEV_COUNT);
+err_char_dev_register:
+	mutex_destroy(&nqx_dev->read_mutex);
+err_clkreq_gpio:
+	gpio_free(platform_data->clkreq_gpio);
+err_ese_gpio:
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(platform_data->ese_gpio);
+err_firm_gpio:
+	gpio_free(platform_data->firm_gpio);
+err_irq_gpio:
+	gpio_free(platform_data->irq_gpio);
+err_en_gpio:
+	gpio_free(platform_data->en_gpio);
+err_mem:
+	kfree(nqx_dev->kbuf);
+err_free_dev:
+	kfree(nqx_dev);
+err_free_data:
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, platform_data);
+err_platform_data:
+	dev_err(&client->dev,
+	"%s: probing nqxx failed, check hardware\n",
+		 __func__);
+	return r;
+}
+
+static int nqx_remove(struct i2c_client *client)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev;
+
+	nqx_dev = i2c_get_clientdata(client);
+	if (!nqx_dev) {
+		dev_err(&client->dev,
+		"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	unregister_reboot_notifier(&nfcc_notifier);
+	free_irq(client->irq, nqx_dev);
+	cdev_del(&nqx_dev->c_dev);
+	device_destroy(nqx_dev->nqx_class, nqx_dev->devno);
+	class_destroy(nqx_dev->nqx_class);
+	unregister_chrdev_region(nqx_dev->devno, DEV_COUNT);
+	mutex_destroy(&nqx_dev->read_mutex);
+	gpio_free(nqx_dev->clkreq_gpio);
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(nqx_dev->ese_gpio);
+	gpio_free(nqx_dev->firm_gpio);
+	gpio_free(nqx_dev->irq_gpio);
+	gpio_free(nqx_dev->en_gpio);
+	kfree(nqx_dev->kbuf);
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, nqx_dev->pdata);
+
+	kfree(nqx_dev);
+err:
+	return ret;
+}
+
+static int nqx_suspend(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_enabled) {
+		if (!enable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = true;
+	}
+	return 0;
+}
+
+static int nqx_resume(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_wake_up) {
+		if (!disable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = false;
+	}
+	return 0;
+}
+
+static const struct i2c_device_id nqx_id[] = {
+	{"nqx-i2c", 0},
+	{}
+};
+
+static const struct dev_pm_ops nfc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(nqx_suspend, nqx_resume)
+};
+
+static struct i2c_driver nqx = {
+	.id_table = nqx_id,
+	.probe = nqx_probe,
+	.remove = nqx_remove,
+	.driver = {
+		.name = "nq-nci",
+		.of_match_table = msm_match_table,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+		.pm = &nfc_pm_ops,
+	},
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			  void *v)
+{
+	gpio_set_value(disable_ctrl, 1);
+	return NOTIFY_OK;
+}
+
+/*
+ * module load/unload record keeping
+ */
+static int __init nqx_dev_init(void)
+{
+	return i2c_add_driver(&nqx);
+}
+module_init(nqx_dev_init);
+
+static void __exit nqx_dev_exit(void)
+{
+	unregister_reboot_notifier(&nfcc_notifier);
+	i2c_del_driver(&nqx);
+}
+module_exit(nqx_dev_exit);
+
+MODULE_DESCRIPTION("NFC nqx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
new file mode 100644
index 0000000..515547c
--- /dev/null
+++ b/drivers/nfc/nq-nci.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __NQ_NCI_H
+#define __NQ_NCI_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/nfcinfo.h>
+
+#define NFC_SET_PWR			_IOW(0xE9, 0x01, unsigned int)
+#define ESE_SET_PWR			_IOW(0xE9, 0x02, unsigned int)
+#define ESE_GET_PWR			_IOR(0xE9, 0x03, unsigned int)
+#define SET_RX_BLOCK			_IOW(0xE9, 0x04, unsigned int)
+#define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
+#define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
+
+#define NFC_RX_BUFFER_CNT_START		(0x0)
+#define PAYLOAD_HEADER_LENGTH		(0x3)
+#define PAYLOAD_LENGTH_MAX		(256)
+#define BYTE				(0x8)
+#define NCI_IDENTIFIER			(0x10)
+
+enum nfcc_initial_core_reset_ntf {
+	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
+	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
+	DEFAULT_INITIAL_CORE_RESET_NTF, /*2*/
+};
+
+enum nfcc_chip_variant {
+	NFCC_NQ_210			= 0x48,	/**< NFCC NQ210 */
+	NFCC_NQ_220			= 0x58,	/**< NFCC NQ220 */
+	NFCC_NQ_310			= 0x40,	/**< NFCC NQ310 */
+	NFCC_NQ_330			= 0x51,	/**< NFCC NQ330 */
+	NFCC_PN66T			= 0x18,	/**< NFCC PN66T */
+	NFCC_NOT_SUPPORTED	        = 0xFF	/**< NFCC is not supported */
+};
+#endif
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 322370a..af5314e 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -4128,6 +4128,94 @@
 }
 EXPORT_SYMBOL(gsi_map_virtual_ch_to_per_ep);
 
+void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
+	uint32_t db_addr_low, uint32_t db_addr_high)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return;
+	}
+
+	gsi_writel(db_addr_low, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_ring_hdl, gsi_ctx->per.ee));
+
+	gsi_writel(db_addr_high, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_ring_hdl, gsi_ctx->per.ee));
+}
+EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
+
+void gsi_wdi3_dump_register(unsigned long chan_hdl)
+{
+	uint32_t val;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return;
+	}
+	GSIDBG("reg dump ch id %d\n", chan_hdl);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_QOS_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_QOS_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS 0x%x\n", val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	GSIDBG("GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS 0x%x\n", val);
+}
+EXPORT_SYMBOL(gsi_wdi3_dump_register);
+
 static int msm_gsi_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 427c177..140e3b6 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -117,7 +117,7 @@
 	__stringify(IPA_CLIENT_HSIC5_CONS),
 	__stringify(IPA_CLIENT_WLAN1_PROD),
 	__stringify(IPA_CLIENT_WLAN1_CONS),
-	__stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
+	__stringify(IPA_CLIENT_WLAN2_PROD),
 	__stringify(IPA_CLIENT_WLAN2_CONS),
 	__stringify(RESERVED_PROD_14),
 	__stringify(IPA_CLIENT_WLAN3_CONS),
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
index c03041f..906b56e 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa_wdi3.h>
@@ -8,6 +8,7 @@
 #include <linux/string.h>
 #include "../ipa_common_i.h"
 #include "../ipa_v3/ipa_pm.h"
+#include "../ipa_v3/ipa_i.h"
 
 #define OFFLOAD_DRV_NAME "ipa_wdi"
 #define IPA_WDI_DBG(fmt, args...) \
@@ -113,6 +114,10 @@
 
 	ipa_wdi_ctx->is_smmu_enabled = out->is_smmu_enabled;
 
+	if (ipa3_ctx->ipa_wdi3_over_gsi)
+		out->is_over_gsi = true;
+	else
+		out->is_over_gsi = false;
 	return 0;
 }
 EXPORT_SYMBOL(ipa_wdi_init);
@@ -243,14 +248,20 @@
 
 	memset(tx_prop, 0, sizeof(tx_prop));
 	tx_prop[0].ip = IPA_IP_v4;
-	tx_prop[0].dst_pipe = IPA_CLIENT_WLAN1_CONS;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		tx_prop[0].dst_pipe = IPA_CLIENT_WLAN1_CONS;
+	else
+		tx_prop[0].dst_pipe = IPA_CLIENT_WLAN2_CONS;
 	tx_prop[0].alt_dst_pipe = in->alt_dst_pipe;
 	tx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
 	strlcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
 		sizeof(tx_prop[0].hdr_name));
 
 	tx_prop[1].ip = IPA_IP_v6;
-	tx_prop[1].dst_pipe = IPA_CLIENT_WLAN1_CONS;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		tx_prop[1].dst_pipe = IPA_CLIENT_WLAN1_CONS;
+	else
+		tx_prop[1].dst_pipe = IPA_CLIENT_WLAN2_CONS;
 	tx_prop[1].alt_dst_pipe = in->alt_dst_pipe;
 	tx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
 	strlcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
@@ -259,10 +270,12 @@
 	/* populate rx prop */
 	rx.num_props = 2;
 	rx.prop = rx_prop;
-
 	memset(rx_prop, 0, sizeof(rx_prop));
 	rx_prop[0].ip = IPA_IP_v4;
-	rx_prop[0].src_pipe = IPA_CLIENT_WLAN1_PROD;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		rx_prop[0].src_pipe = IPA_CLIENT_WLAN1_PROD;
+	else
+		rx_prop[0].src_pipe = IPA_CLIENT_WLAN2_PROD;
 	rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
 	if (in->is_meta_data_valid) {
 		rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -271,7 +284,10 @@
 	}
 
 	rx_prop[1].ip = IPA_IP_v6;
-	rx_prop[1].src_pipe = IPA_CLIENT_WLAN1_PROD;
+	if (!ipa3_ctx->ipa_wdi3_over_gsi)
+		rx_prop[1].src_pipe = IPA_CLIENT_WLAN1_PROD;
+	else
+		rx_prop[1].src_pipe = IPA_CLIENT_WLAN2_PROD;
 	rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
 	if (in->is_meta_data_valid) {
 		rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -649,8 +665,13 @@
 		}
 	}
 
-	ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
-	ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	} else {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+	}
 
 	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
 		if (ipa_disconn_wdi_pipes(ipa_ep_idx_rx, ipa_ep_idx_tx)) {
@@ -705,8 +726,13 @@
 		return -EPERM;
 	}
 
-	ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
-	ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	} else {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+	}
 
 	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
 		if (ipa_enable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) {
@@ -766,8 +792,13 @@
 		return -EPERM;
 	}
 
-	ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
-	ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	} else {
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+	}
 
 	if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
 		if (ipa_disable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) {
@@ -825,9 +856,11 @@
 		rm_profile.max_supported_bandwidth_mbps =
 			profile->max_supported_bw_mbps;
 
-		if (profile->client == IPA_CLIENT_WLAN1_PROD) {
+		if (profile->client == IPA_CLIENT_WLAN1_PROD ||
+			profile->client == IPA_CLIENT_WLAN2_PROD) {
 			resource_name = IPA_RM_RESOURCE_WLAN_PROD;
-		} else if (profile->client == IPA_CLIENT_WLAN1_CONS) {
+		} else if (profile->client == IPA_CLIENT_WLAN1_CONS ||
+				   profile->client == IPA_CLIENT_WLAN2_CONS) {
 			resource_name = IPA_RM_RESOURCE_WLAN_CONS;
 		} else {
 			IPA_WDI_ERR("not supported\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index b27ea05..208ff10 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -5219,6 +5219,7 @@
 	ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
 	ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
 	ipa3_ctx->ipa_wdi2_over_gsi = resource_p->ipa_wdi2_over_gsi;
+	ipa3_ctx->ipa_wdi3_over_gsi = resource_p->ipa_wdi3_over_gsi;
 	ipa3_ctx->ipa_fltrt_not_hashable = resource_p->ipa_fltrt_not_hashable;
 	ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
 	ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
@@ -5804,6 +5805,7 @@
 	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
 	ipa_drv_res->ipa_wdi2 = false;
 	ipa_drv_res->ipa_wdi2_over_gsi = false;
+	ipa_drv_res->ipa_wdi3_over_gsi = false;
 	ipa_drv_res->ipa_mhi_dynamic_config = false;
 	ipa_drv_res->use_64_bit_dma_mask = false;
 	ipa_drv_res->use_bw_vote = false;
@@ -5902,6 +5904,13 @@
 			ipa_drv_res->ipa_wdi2_over_gsi
 			? "True" : "False");
 
+	ipa_drv_res->ipa_wdi3_over_gsi =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-wdi3-over-gsi");
+	IPADBG(": WDI-3.0 over gsi= %s\n",
+			ipa_drv_res->ipa_wdi3_over_gsi
+			? "True" : "False");
+
 	ipa_drv_res->ipa_wdi2 =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,ipa-wdi2");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 777dcb3..7eb6421 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1600,6 +1600,7 @@
 	bool modem_cfg_emb_pipe_flt;
 	bool ipa_wdi2;
 	bool ipa_wdi2_over_gsi;
+	bool ipa_wdi3_over_gsi;
 	bool ipa_fltrt_not_hashable;
 	bool use_64_bit_dma_mask;
 	/* featurize if memory footprint becomes a concern */
@@ -1689,6 +1690,7 @@
 	bool modem_cfg_emb_pipe_flt;
 	bool ipa_wdi2;
 	bool ipa_wdi2_over_gsi;
+	bool ipa_wdi3_over_gsi;
 	bool ipa_fltrt_not_hashable;
 	bool use_64_bit_dma_mask;
 	bool use_bw_vote;
@@ -2270,6 +2272,10 @@
 		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
 		unsigned long *iova);
 
+int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova);
+
 /*
  * Tethering bridge (Rmnet / MBIM)
  */
@@ -2507,6 +2513,7 @@
 int ipa3_wdi_init(void);
 int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
 int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id);
 int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
 		    unsigned long timeout);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
index a7d3ff9..85a2016 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
@@ -164,6 +164,7 @@
 	struct ipa_mhi_ready_indication_msg_v01 ready_ind;
 	struct ipa_mhi_alloc_channel_req_msg_v01 alloc_ch_req;
 	struct ipa_mhi_alloc_channel_resp_msg_v01 alloc_ch_resp;
+	struct ipa_mhi_clk_vote_resp_msg_v01 clk_vote_resp;
 };
 
 struct imp_mhi_driver {
@@ -380,7 +381,12 @@
 			ridx++;
 			resp->alloc_resp_arr_len = ridx;
 			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-			resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01;
+			/* return INCOMPATIBLE_STATE if mhi not active */
+			if (mhi_is_active(imp_ctx->md.mhi_dev))
+				resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01;
+			else
+				resp->resp.error =
+					IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
 			return -EINVAL;
 		}
 
@@ -436,6 +442,7 @@
 		IMP_DBG("Configuring MHI device for ch %d\n", ch->props.id);
 		ret = mhi_device_configure(imp_ctx->md.mhi_dev, ch->props.dir,
 			ch_config, 2);
+		/* configure mhi-host, no need check mhi state */
 		if (ret) {
 			IMP_ERR("mhi_device_configure failed for ch %d\n",
 				req->tr_info_arr[i].ch_id);
@@ -478,7 +485,7 @@
 	if (imp_ctx->state != IMP_READY) {
 		IMP_ERR("invalid state %d\n", imp_ctx->state);
 		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-		resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+		resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
 		mutex_unlock(&imp_ctx->mutex);
 		return resp;
 	}
@@ -542,7 +549,11 @@
 			.is_success = 0;
 		resp->alloc_resp_arr_len++;
 		resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
-		resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+		/* return INCOMPATIBLE_STATE if mhi not active */
+		if (mhi_is_active(imp_ctx->md.mhi_dev))
+			resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
+		else
+			resp->resp.error = IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
 		goto fail_smmu;
 	}
 
@@ -592,23 +603,29 @@
  *
  * Return: 0 on success, negative otherwise
  */
-int imp_handle_vote_req(bool vote)
+struct ipa_mhi_clk_vote_resp_msg_v01
+	*imp_handle_vote_req(bool vote)
 {
 	int ret;
+	struct ipa_mhi_clk_vote_resp_msg_v01 *resp =
+	&imp_ctx->qmi.clk_vote_resp;
 
 	IMP_DBG_LOW("vote %d\n", vote);
+	memset(resp, 0, sizeof(struct ipa_mhi_clk_vote_resp_msg_v01));
+	resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+	resp->resp.error = IPA_QMI_ERR_INTERNAL_V01;
 
 	mutex_lock(&imp_ctx->mutex);
 	if (imp_ctx->state != IMP_STARTED) {
 		IMP_ERR("unexpected vote when in state %d\n", imp_ctx->state);
 		mutex_unlock(&imp_ctx->mutex);
-		return -EPERM;
+		return resp;
 	}
 
 	if (vote == imp_ctx->lpm_disabled) {
 		IMP_ERR("already voted/devoted %d\n", vote);
 		mutex_unlock(&imp_ctx->mutex);
-		return -EPERM;
+		return resp;
 	}
 	mutex_unlock(&imp_ctx->mutex);
 
@@ -622,7 +639,14 @@
 		ret = mhi_device_get_sync(imp_ctx->md.mhi_dev);
 		if (ret) {
 			IMP_ERR("mhi_sync_get failed %d\n", ret);
-			return ret;
+			resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
+			/* return INCOMPATIBLE_STATE if mhi not active */
+			if (mhi_is_active(imp_ctx->md.mhi_dev))
+				resp->resp.error = IPA_QMI_ERR_INVALID_ID_V01;
+			else
+				resp->resp.error =
+					IPA_QMI_ERR_INCOMPATIBLE_STATE_V01;
+			return resp;
 		}
 	} else {
 		mhi_device_put(imp_ctx->md.mhi_dev);
@@ -635,7 +659,8 @@
 		imp_ctx->lpm_disabled = false;
 	mutex_unlock(&imp_ctx->mutex);
 
-	return 0;
+	resp->resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	return resp;
 }
 
 static int imp_read_iova_from_dtsi(const char *node, struct imp_iova_addr *out)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h
index 201d685..ba19d97 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.h
@@ -15,7 +15,7 @@
 struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
 	struct ipa_mhi_alloc_channel_req_msg_v01 *req);
 
-int imp_handle_vote_req(bool vote);
+struct ipa_mhi_clk_vote_resp_msg_v01 *imp_handle_vote_req(bool vote);
 
 void imp_handle_modem_shutdown(void);
 
@@ -33,9 +33,10 @@
 		return NULL;
 }
 
-static inline int imp_handle_vote_req(bool vote)
+static inline struct ipa_mhi_clk_vote_resp_msg_v01
+	*imp_handle_vote_req(bool vote)
 {
-	return -EPERM;
+	return NULL;
 }
 
 static inline  void imp_handle_modem_shutdown(void)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index fd70613..be99fb7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -285,25 +285,19 @@
 	const void *decoded_msg)
 {
 	struct ipa_mhi_clk_vote_req_msg_v01 *vote_req;
-	struct ipa_mhi_clk_vote_resp_msg_v01 resp;
+	struct ipa_mhi_clk_vote_resp_msg_v01 *resp;
 	int rc;
 
 	vote_req = (struct ipa_mhi_clk_vote_req_msg_v01 *)decoded_msg;
 	IPAWANDBG("Received QMI_IPA_MHI_CLK_VOTE_REQ_V01(%d)\n",
 		vote_req->mhi_vote);
-	rc = imp_handle_vote_req(vote_req->mhi_vote);
-	if (rc) {
-		resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
-		resp.resp.error = IPA_QMI_ERR_INTERNAL_V01;
-	} else {
-		resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
-	}
+	resp = imp_handle_vote_req(vote_req->mhi_vote);
 	IPAWANDBG("start sending QMI_IPA_MHI_CLK_VOTE_RESP_V01\n");
 	rc = qmi_send_response(qmi_handle, sq, txn,
 		QMI_IPA_MHI_CLK_VOTE_RESP_V01,
 		IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN,
 		ipa_mhi_clk_vote_resp_msg_v01_ei,
-		&resp);
+		resp);
 
 	if (rc < 0)
 		IPAWANERR("QMI_IPA_MHI_CLK_VOTE_RESP_V01 failed\n");
@@ -352,7 +346,7 @@
 	    ipa3_rmnet_ctx.ipa_rmnet_ssr) {
 		IPAWANERR(
 		"Got bad response %d from request id %d (error %d)\n",
-		req_id, result, error);
+		result, req_id, error);
 		return result;
 	}
 	IPAWANDBG_LOW("Received %s successfully\n", resp_type);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e552446..a5391a9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -733,7 +733,7 @@
 	return 0;
 }
 
-static int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
+int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
 		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
 		unsigned long *iova)
 {
@@ -2011,7 +2011,7 @@
 	 */
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
-				clnt_hdl, ep->client);
+			clnt_hdl, ep->client);
 		/* remove delay on wlan-prod pipe*/
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 4095e9e..df6f32a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1242,7 +1242,7 @@
 			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 16, 3, 8, 8, IPA_EE_UC } },
+			{ 16, 11, 8, 8, IPA_EE_UC } },
 	[IPA_3_5_1][IPA_CLIENT_WLAN2_CONS]          =  {
 			true, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
@@ -1701,6 +1701,12 @@
 			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
 			QMB_MASTER_SELECT_DDR,
 			{ 6, 2, 8, 16, IPA_EE_UC } },
+	[IPA_4_1][IPA_CLIENT_WLAN2_PROD]          = {
+			true, IPA_v4_0_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_REP_SEQ_TYPE_2PKT_PROC_PASS_NO_DEC_UCP_DMAP,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 9, 8, 16, IPA_EE_AP } },
 	[IPA_4_1][IPA_CLIENT_USB_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
@@ -1793,7 +1799,7 @@
 			false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
-			{ 20, 13, 9, 9, IPA_EE_AP } },
+			{ 17, 1, 8, 13, IPA_EE_AP } },
 	[IPA_4_1][IPA_CLIENT_WLAN3_CONS]          = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			false,
@@ -5050,9 +5056,15 @@
 	    param_in->client == IPA_CLIENT_ETHERNET_PROD ||
 		param_in->client == IPA_CLIENT_WIGIG_PROD) {
 		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
-	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
+	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD ||
+			   param_in->client == IPA_CLIENT_WLAN2_PROD) {
 		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
-		result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
+		if (param_in->client == IPA_CLIENT_WLAN2_PROD)
+			result = ipa3_write_qmapid_wdi3_gsi_pipe(
+				ipa_ep_idx, meta.qmap_id);
+		else
+			result = ipa3_write_qmapid_wdi_pipe(
+				ipa_ep_idx, meta.qmap_id);
 		if (result)
 			IPAERR_RL("qmap_id %d write failed on ep=%d\n",
 					meta.qmap_id, ipa_ep_idx);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index 5f64d0c..b9b8519 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -1,41 +1,727 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
 #include <linux/ipa_wdi3.h>
 
+#define IPA_WDI3_TX_DIR 1
+#define IPA_WDI3_RX_DIR 2
+
+#define UPDATE_RP_MODERATION_CONFIG 1
+#define UPDATE_RP_MODERATION_THRESHOLD 8
+
+#define IPA_WDI3_GSI_EVT_RING_INT_MODT 32
+
+static void ipa3_wdi3_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static void ipa3_wdi3_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
+	struct ipa_wdi_pipe_setup_info *info,
+	struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	struct gsi_chan_props gsi_channel_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	union __packed gsi_evt_scratch evt_scratch;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	int result, len;
+	unsigned long va;
+
+	if (!info || !info_smmu || !ep) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+	/* setup event ring */
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_WDI3_EV;
+	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	/* 16 (for Tx) and 8 (for Rx) */
+	if (dir == IPA_WDI3_TX_DIR)
+		gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	else
+		gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
+	if (!is_smmu_enabled) {
+		gsi_evt_ring_props.ring_len = info->event_ring_size;
+		gsi_evt_ring_props.ring_base_addr =
+			(u64)info->event_ring_base_pa;
+	} else {
+		len = info_smmu->event_ring_size;
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES,
+				true, info->event_ring_base_pa,
+				&info_smmu->event_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				return -EFAULT;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_COMP_RING_RES, true,
+				info->event_ring_base_pa,
+				&info_smmu->event_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				return -EFAULT;
+			}
+		}
+		gsi_evt_ring_props.ring_len = len;
+		gsi_evt_ring_props.ring_base_addr = (u64)va;
+	}
+	gsi_evt_ring_props.int_modt = IPA_WDI3_GSI_EVT_RING_INT_MODT;
+	gsi_evt_ring_props.int_modc = 1;
+	gsi_evt_ring_props.exclusive = true;
+	gsi_evt_ring_props.err_cb = ipa3_wdi3_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("fail to alloc RX event ring\n");
+		/* TODO: release the gsi_smmu_mapping here if smmu enabled */
+		return -EFAULT;
+	}
+
+	ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		gsi_evt_ring_props.ring_base_addr;
+
+	/* setup channel ring */
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_WDI3;
+	if (dir == IPA_WDI3_TX_DIR)
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+	else
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	if (!gsi_ep_info) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+		       ep->client);
+		result = -EINVAL;
+		goto fail_get_gsi_ep_info;
+	} else
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa3_wdi3_gsi_chan_err_cb;
+
+	if (!is_smmu_enabled) {
+		gsi_channel_props.ring_len = (u16)info->transfer_ring_size;
+		gsi_channel_props.ring_base_addr =
+			(u64)info->transfer_ring_base_pa;
+	} else {
+		len = info_smmu->transfer_ring_size;
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES,
+				true, info->transfer_ring_base_pa,
+				&info_smmu->transfer_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_get_gsi_ep_info;
+			}
+		} else {
+			if (ipa_create_gsi_smmu_mapping(
+				IPA_WDI_RX_RING_RES, true,
+				info->transfer_ring_base_pa,
+				&info_smmu->transfer_ring_base, len,
+				false, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_get_gsi_ep_info;
+			}
+		}
+		gsi_channel_props.ring_len = len;
+		gsi_channel_props.ring_base_addr = (u64)va;
+	}
+
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		/* TODO: release the gsi_smmu_mapping here if smmu enabled */
+		goto fail_get_gsi_ep_info;
+	}
+
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+
+	/* write event scratch */
+	memset(&evt_scratch, 0, sizeof(evt_scratch));
+	evt_scratch.wdi3.update_rp_moderation_config =
+		UPDATE_RP_MODERATION_CONFIG;
+	result = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, evt_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write evt ring scratch\n");
+		goto fail_write_scratch;
+	}
+	/* write event ring db address */
+	gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
+		(u32)info->event_ring_doorbell_pa,
+		(u32)((u64)info->event_ring_doorbell_pa >> 32));
+
+	/* write channel scratch */
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.wdi3.update_rp_moderation_threshold =
+		UPDATE_RP_MODERATION_THRESHOLD;
+	if (dir == IPA_WDI3_RX_DIR) {
+		ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+		/* this metadata reg offset need to be in words */
+		ch_scratch.wdi3.endp_metadata_reg_offset =
+			ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
+				gsi_ep_info->ipa_ep_num) / 4;
+	}
+
+	if (!is_smmu_enabled) {
+		ch_scratch.wdi3.wifi_rp_address_low =
+			(u32)info->transfer_ring_doorbell_pa;
+		ch_scratch.wdi3.wifi_rp_address_high =
+			(u32)((u64)info->transfer_ring_doorbell_pa >> 32);
+	} else {
+		if (dir == IPA_WDI3_TX_DIR) {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_DB_RES,
+				true, info_smmu->transfer_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+			ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)va >> 32);
+		} else {
+			if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+				true, info_smmu->transfer_ring_doorbell_pa,
+				NULL, 4, true, &va)) {
+				IPAERR("failed to get smmu mapping\n");
+				result = -EFAULT;
+				goto fail_write_scratch;
+			}
+			/*
+			 * TODO: for all access over PCIe for MDM,
+			 * there is no SMMU
+			 */
+			ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
+			ch_scratch.wdi3.wifi_rp_address_high =
+				(u32)((u64)va >> 32);
+		}
+	}
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write evt ring scratch\n");
+		goto fail_write_scratch;
+	}
+	return 0;
+
+fail_write_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+	ep->gsi_chan_hdl = ~0;
+fail_get_gsi_ep_info:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+	return result;
+}
+
 int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
 	struct ipa_wdi_conn_out_params *out,
 	ipa_wdi_meter_notifier_cb wdi_notify)
 {
-	IPAERR("wdi3 over uc offload not supported");
-	WARN_ON(1);
+	enum ipa_client_type rx_client;
+	enum ipa_client_type tx_client;
+	struct ipa3_ep_context *ep_rx;
+	struct ipa3_ep_context *ep_tx;
+	int ipa_ep_idx_rx;
+	int ipa_ep_idx_tx;
+	int result = 0;
+	u32 gsi_db_addr_low, gsi_db_addr_high;
+	void __iomem *db_addr;
+	u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
 
-	return -EFAULT;
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (in == NULL || out == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	if (in->is_smmu_enabled == false) {
+		rx_client = in->u_rx.rx.client;
+		tx_client = in->u_tx.tx.client;
+	} else {
+		rx_client = in->u_rx.rx_smmu.client;
+		tx_client = in->u_tx.tx_smmu.client;
+	}
+
+	ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client);
+	ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client);
+
+	if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		return -EFAULT;
+	}
+	if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES ||
+		ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("ep out of range.\n");
+		return -EFAULT;
+	}
+
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
+
+	if (ep_rx->valid || ep_tx->valid) {
+		IPAERR("EP already allocated.\n");
+		return -EFAULT;
+	}
+
+	memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
+	memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	if (wdi_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify;
+	else
+		IPADBG("wdi_notify is null\n");
+#endif
+
+	/* setup rx ep cfg */
+	ep_rx->valid = 1;
+	ep_rx->client = rx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_rx);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -EFAULT;
+	}
+	ep_rx->client_notify = in->notify;
+	ep_rx->priv = in->priv;
+
+	if (in->is_smmu_enabled == false)
+		memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg,
+			sizeof(ep_rx->cfg));
+	else
+		memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg,
+			sizeof(ep_rx->cfg));
+
+	if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	IPADBG("ipa3_ctx->ipa_wdi3_over_gsi %d\n",
+		   ipa3_ctx->ipa_wdi3_over_gsi);
+	/* setup RX gsi channel */
+	if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
+		&in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR,
+		ep_rx)) {
+		IPAERR("fail to setup wdi3 gsi rx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	if (gsi_query_channel_db_addr(ep_rx->gsi_chan_hdl,
+		&gsi_db_addr_low, &gsi_db_addr_high)) {
+		IPAERR("failed to query gsi rx db addr\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* only 32 bit lsb is used */
+	out->rx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
+	IPADBG("out->rx_uc_db_pa %llu\n", out->rx_uc_db_pa);
+
+	ipa3_install_dflt_flt_rules(ipa_ep_idx_rx);
+	IPADBG("client %d (ep: %d) connected\n", rx_client,
+		ipa_ep_idx_rx);
+
+	/* setup tx ep cfg */
+	ep_tx->valid = 1;
+	ep_tx->client = tx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("disable data path failed res=%d ep=%d.\n", result,
+			ipa_ep_idx_tx);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (in->is_smmu_enabled == false)
+		memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg,
+			sizeof(ep_tx->cfg));
+	else
+		memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg,
+			sizeof(ep_tx->cfg));
+
+	if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) {
+		IPAERR("fail to setup tx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* setup TX gsi channel */
+	if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
+		&in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR,
+		ep_tx)) {
+		IPAERR("fail to setup wdi3 gsi tx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	if (gsi_query_channel_db_addr(ep_tx->gsi_chan_hdl,
+		&gsi_db_addr_low, &gsi_db_addr_high)) {
+		IPAERR("failed to query gsi tx db addr\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* only 32 bit lsb is used */
+	out->tx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
+	IPADBG("out->tx_uc_db_pa %llu\n", out->tx_uc_db_pa);
+	IPADBG("client %d (ep: %d) connected\n", tx_client,
+		ipa_ep_idx_tx);
+
+	/* ring initial event ring dbs */
+	gsi_query_evt_ring_db_addr(ep_rx->gsi_evt_ring_hdl,
+		&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+	IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
+		ep_rx->gsi_evt_ring_hdl, evt_ring_db_addr_low,
+		evt_ring_db_addr_high);
+
+	/* only 32 bit lsb is used */
+	db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+	/*
+	 * IPA/GSI driver should ring the event DB once after
+	 * initialization of the event, with a value that is
+	 * outside of the ring range. Eg: ring base = 0x1000,
+	 * ring size = 0x100 => AP can write value > 0x1100
+	 * into the doorbell address. Eg: 0x 1110
+	 */
+	iowrite32(in->u_rx.rx.event_ring_size / 4 + 10, db_addr);
+	gsi_query_evt_ring_db_addr(ep_tx->gsi_evt_ring_hdl,
+		&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+
+	/* only 32 bit lsb is used */
+	db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+	/*
+	 * IPA/GSI driver should ring the event DB once after
+	 * initialization of the event, with a value that is
+	 * outside of the ring range. Eg: ring base = 0x1000,
+	 * ring size = 0x100 => AP can write value > 0x1100
+	 * into the doorbell address. Eg: 0x 1110
+	 */
+	iowrite32(in->u_tx.tx.event_ring_size / 4 + 10, db_addr);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
 }
 
 int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 {
-	IPAERR("wdi3 over uc offload not supported");
-	WARN_ON(1);
+	struct ipa3_ep_context *ep_tx, *ep_rx;
+	int result = 0;
 
-	return -EFAULT;
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
+	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
+
+	if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES ||
+		ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("invalid ipa ep index\n");
+		return -EINVAL;
+	}
+
+	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+
+	/* tear down tx pipe */
+	result = gsi_reset_evt_ring(ep_tx->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset evt ring: %d.\n", result);
+		return result;
+	}
+	result = ipa3_release_gsi_channel(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("failed to release gsi channel: %d\n", result);
+		return result;
+	}
+
+	memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
+	IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
+
+	/* tear down rx pipe */
+	result = gsi_reset_evt_ring(ep_rx->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset evt ring: %d.\n", result);
+		return result;
+	}
+	result = ipa3_release_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("failed to release gsi channel: %d\n", result);
+		return result;
+	}
+
+	ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
+	memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
+	IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
+
+	return result;
 }
 
 int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 {
-	IPAERR("wdi3 over uc offload not supported");
-	WARN_ON(1);
+	struct ipa3_ep_context *ep_tx, *ep_rx;
+	int result = 0;
 
-	return -EFAULT;
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
+	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
+
+	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
+	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
+
+	/* start gsi tx channel */
+	result = gsi_start_channel(ep_tx->gsi_chan_hdl);
+	if (result) {
+		IPAERR("failed to start gsi tx channel\n");
+		return -EFAULT;
+	}
+
+	/* start gsi rx channel */
+	result = gsi_start_channel(ep_rx->gsi_chan_hdl);
+	if (result) {
+		IPAERR("failed to start gsi rx channel\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* enable data path */
+	result = ipa3_enable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_rx);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_tx);
+		result = -EFAULT;
+		goto fail;
+	}
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
 }
 
 int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 {
-	IPAERR("wdi3 over uc offload not supported");
-	WARN_ON(1);
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	u32 source_pipe_bitmask = 0;
+	bool disable_force_clear = false;
+	struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
 
-	return -EFAULT;
+	/* wdi3 only support over gsi */
+	if (!ipa3_ctx->ipa_wdi3_over_gsi) {
+		IPAERR("wdi3 over uc offload not supported");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* disable tx data path */
+	result = ipa3_disable_data_path(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_tx);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* disable rx data path */
+	result = ipa3_disable_data_path(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_rx);
+		result = -EFAULT;
+		goto fail;
+	}
+	/*
+	 * For WDI 3.0 need to ensure pipe will be empty before suspend
+	 * as IPA uC will fail to suspend the pipe otherwise.
+	 */
+	ep = &ipa3_ctx->ep[ipa_ep_idx_rx];
+	source_pipe_bitmask = 1 <<
+			ipa3_get_ep_mapping(ep->client);
+	result = ipa3_enable_force_clear(ipa_ep_idx_rx,
+			false, source_pipe_bitmask);
+	if (result) {
+		/*
+		 * assuming here modem SSR, AP can remove
+		 * the delay in this case
+		 */
+		IPAERR("failed to force clear %d\n", result);
+		IPAERR("remove delay from SCND reg\n");
+		ep_ctrl_scnd.endp_delay = false;
+		ipahal_write_reg_n_fields(
+			IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx_rx,
+			&ep_ctrl_scnd);
+	} else {
+		disable_force_clear = true;
+	}
+
+	/* stop gsi rx channel */
+	result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
+	if (result) {
+		IPAERR("failed to stop gsi rx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	/* stop gsi tx channel */
+	result = ipa3_stop_gsi_channel(ipa_ep_idx_tx);
+	if (result) {
+		IPAERR("failed to stop gsi tx channel\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* reset gsi rx channel */
+	result = ipa3_reset_gsi_channel(ipa_ep_idx_rx);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset gsi channel: %d.\n", result);
+		result = -EFAULT;
+		goto fail;
+	}
+	/* reset gsi tx channel */
+	result = ipa3_reset_gsi_channel(ipa_ep_idx_tx);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset gsi channel: %d.\n", result);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (disable_force_clear)
+		ipa3_disable_force_clear(ipa_ep_idx_rx);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+
+}
+
+int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union __packed gsi_channel_scratch ch_scratch;
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR_RL("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	result = gsi_read_channel_scratch(ep->gsi_chan_hdl, &ch_scratch);
+
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to read channel scratch %d\n", result);
+		return result;
+	}
+	result = gsi_stop_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS && result != -GSI_STATUS_AGAIN &&
+		result != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("failed to stop gsi channel %d\n", result);
+		return result;
+	}
+
+	ch_scratch.wdi3.qmap_id = qmap_id;
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+			ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write channel scratch %d\n", result);
+		return result;
+	}
+
+	result =  gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to start gsi channel %d\n", result);
+		return result;
+	}
+
+	return 0;
 }
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index 7fdbf39..be6487d 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o ipa_test_wdi3.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_wdi3.c b/drivers/platform/msm/ipa/test/ipa_test_wdi3.c
new file mode 100644
index 0000000..a6f69cc
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_wdi3.c
@@ -0,0 +1,1163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_ut_framework.h"
+#include <linux/ipa_wdi3.h>
+#include <linux/ipa.h>
+#include <linux/delay.h>
+#include "../ipa_v3/ipa_i.h"
+
+#define NUM_TX_BUFS 10
+#define NUM_RX_BUFS 10
+#define NUM_REDUNDANT_TX_ELE 1
+#define NUM_RX_TR_ELE NUM_RX_BUFS
+#define NUM_RX_ER_ELE NUM_RX_BUFS
+#define NUM_TX_TR_ELE (NUM_TX_BUFS + NUM_REDUNDANT_TX_ELE)
+#define NUM_TX_ER_ELE (NUM_TX_BUFS + NUM_REDUNDANT_TX_ELE)
+
+#define RX_METADATA_SIZE 4
+#define PACKET_HEADER_SIZE 220
+#define ETH_PACKET_SIZE 4
+#define PACKET_CONTENT 0x12345678
+
+#define PKT_SIZE 4096
+
+#define DB_REGISTER_SIZE 4
+
+#define NUM_MULTI_PKT 8
+
+int multi_pkt_array[] = {0x12345678, 0x87654321,
+	0x00112233, 0x01234567, 0x45454545, 0x80808080,
+	0x13245678, 0x12345767, 0x43213456};
+
+int rx_uc_db_local;
+int tx_uc_db_local;
+u8 tx_bf_idx;
+u8 rx_bf_idx;
+
+struct ipa_test_wdi3_context {
+	struct ipa_mem_buffer tx_transfer_ring_addr;
+	struct ipa_mem_buffer tx_event_ring_addr;
+	struct ipa_mem_buffer rx_transfer_ring_addr;
+	struct ipa_mem_buffer rx_event_ring_addr;
+	struct ipa_mem_buffer tx_bufs[NUM_TX_BUFS];
+	struct ipa_mem_buffer rx_bufs[NUM_RX_BUFS];
+	struct ipa_mem_buffer tx_transfer_ring_db;
+	struct ipa_mem_buffer tx_event_ring_db;
+	struct ipa_mem_buffer rx_transfer_ring_db;
+	struct ipa_mem_buffer rx_event_ring_db;
+	dma_addr_t tx_uc_db_pa;
+	dma_addr_t rx_uc_db_pa;
+};
+
+static struct ipa_test_wdi3_context *test_wdi3_ctx;
+
+struct buffer_addr_info {
+	u32 buffer_addr_low;
+	u32 buffer_addr_high : 8;
+	u32 return_buffer_manager : 3;
+	u32 sw_buffer_cookie : 21;
+} __packed;
+
+struct tx_transfer_ring_ele {
+	struct buffer_addr_info buf_or_link_desc_addr_info;
+	u32 resv[6];
+} __packed;
+
+struct tx_event_ring_ele {
+	u32 reserved_5;
+	struct buffer_addr_info buf_or_link_desc_addr_info;
+	u32 buf_or_ext_desc_type : 1;
+	u32 epd : 1;
+	u32 encap_type : 2;
+	u32 encrypt_type : 4;
+	u32 src_buffer_swap : 1;
+	u32 link_meta_swap : 1;
+	u32 hlos_tid : 4;
+	u32 addrX_en : 1;
+	u32 addrY_en : 1;
+	u32 tcl_cmd_number : 16;
+	u32 data_length : 16;
+	u32 ipv4_checksum_en : 1;
+	u32 udp_over_ipv4_checksum_en : 1;
+	u32 udp_over_ipv6_checksum_en : 1;
+	u32 tcp_over_ipv4_checksum_en : 1;
+	u32 tcp_over_ipv6_checksum_en : 1;
+	u32 to_fw : 1;
+	u32 dscp_to_tid_priority_table_id : 1;
+	u32 packet_offset : 9;
+	u32 buffer_timestamp : 19;
+	u32 buffer_timestamp_valid : 1;
+	u32 reserved_4 : 12;
+	u32 reserved_6;
+	u32 reserved_7a : 20;
+	u32 ring_id : 8;
+	u32 looping_count : 4;
+} __packed;
+
+struct rx_mpdu_desc_info {
+	u32 msdu_count : 8;
+	u32 mpdu_sequence_number : 12;
+	u32 fragment_flag : 1;
+	u32 mpdu_retry_bit : 1;
+	u32 ampdu_flag : 1;
+	u32 bar_frame : 1;
+	u32 pn_fields_contain_valid_info : 1;
+	u32 sa_is_valid : 1;
+	u32 sa_idx_timeout : 1;
+	u32 da_is_valid : 1;
+	u32 da_is_mcbc : 1;
+	u32 da_idx_timeout : 1;
+	u32 raw_mpdu : 1;
+	u32 reserved : 1;
+	u32 peer_meta_data;
+} __packed;
+
+struct rx_msdu_desc_info {
+	u32 first_msdu_in_mpdu_flag : 1;
+	u32 last_msdu_in_mpdu_flag : 1;
+	u32 msdu_continuation : 1;
+	u32 msdu_length : 14;
+	u32 reo_destination_indication : 5;
+	u32 msdu_drop : 1;
+	u32 sa_is_valid : 1;
+	u32 sa_idx_timeout : 1;
+	u32 da_is_valid : 1;
+	u32 da_is_mcbc : 1;
+	u32 da_idx_timeout : 1;
+	u32 reserved_0a : 4;
+	u32 reserved_1a;
+} __packed;
+
+struct rx_transfer_ring_ele {
+	struct buffer_addr_info buf_or_link_desc_addr_info;
+	struct rx_mpdu_desc_info rx_mpdu_desc_info_details;
+	struct rx_msdu_desc_info rx_msdu_desc_info_details;
+	u32 rx_reo_queue_desc_addr_31_0;
+	u32 rx_reo_queue_desc_addr_39_32 : 8;
+	u32 reo_dest_buffer_type : 1;
+	u32 reo_push_reason : 2;
+	u32 reo_error_code : 5;
+	u32 receive_queue_number : 16;
+	u32 soft_reorder_info_valid : 1;
+	u32 reorder_opcode : 4;
+	u32 reorder_slot_index : 8;
+	u32 reserved_8a : 19;
+	u32 reserved_9a;
+	u32 reserved_10a;
+	u32 reserved_11a;
+	u32 reserved_12a;
+	u32 reserved_13a;
+	u32 reserved_14a;
+	u32 reserved_15a : 20;
+	u32 ring_id : 8;
+	u32 looping_count : 4;
+} __packed;
+
+struct rx_event_ring_ele {
+	struct buffer_addr_info buf_or_link_desc_addr_info;
+} __packed;
+
+static void ipa_test_wdi3_free_dma_buff(struct ipa_mem_buffer *mem)
+{
+	if (!mem) {
+		IPA_UT_ERR("empty pointer\n");
+		return;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem->size, mem->base,
+		mem->phys_base);
+}
+
+static void ipa_test_wdi3_advance_uc_db(u32 *db, int steps,
+	int num_words, int ring_size)
+{
+	*db = (*db + steps * num_words) % (ring_size / 4);
+	IPA_UT_DBG("new db value: %u\n", *db);
+}
+
+static int ipa_test_wdi3_alloc_mmio(void)
+{
+	int ret = 0, i, j;
+	int num_tx_alloc_bufs, num_rx_alloc_bufs;
+	u32 size;
+
+	if (!test_wdi3_ctx) {
+		IPA_UT_ERR("test_wdi3_ctx is not initialized.\n");
+		return -EFAULT;
+	}
+
+	/* allocate tx transfer ring memory */
+	size = NUM_TX_TR_ELE * sizeof(struct tx_transfer_ring_ele);
+	test_wdi3_ctx->tx_transfer_ring_addr.size = size;
+	test_wdi3_ctx->tx_transfer_ring_addr.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, size,
+			&test_wdi3_ctx->tx_transfer_ring_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_wdi3_ctx->tx_transfer_ring_addr.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		return -ENOMEM;
+	}
+
+	/* allocate tx event ring memory */
+	size = NUM_TX_ER_ELE * sizeof(struct tx_event_ring_ele);
+	test_wdi3_ctx->tx_event_ring_addr.size = size;
+	test_wdi3_ctx->tx_event_ring_addr.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, size,
+			&test_wdi3_ctx->tx_event_ring_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_wdi3_ctx->tx_event_ring_addr.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_tx_event_ring;
+	}
+
+	/* allocate rx transfer ring memory */
+	size = NUM_RX_TR_ELE * sizeof(struct rx_transfer_ring_ele);
+	test_wdi3_ctx->rx_transfer_ring_addr.size = size;
+	test_wdi3_ctx->rx_transfer_ring_addr.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, size,
+			&test_wdi3_ctx->rx_transfer_ring_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_wdi3_ctx->rx_transfer_ring_addr.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_rx_transfer_ring;
+	}
+
+	/* allocate rx event ring memory */
+	size = NUM_RX_ER_ELE * sizeof(struct rx_event_ring_ele);
+	test_wdi3_ctx->rx_event_ring_addr.size = size;
+	test_wdi3_ctx->rx_event_ring_addr.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, size,
+			&test_wdi3_ctx->rx_event_ring_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_wdi3_ctx->rx_event_ring_addr.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_rx_event_ring;
+	}
+
+	/* allocate tx buffers */
+	num_tx_alloc_bufs = NUM_TX_BUFS;
+	for (i = 0; i < NUM_TX_BUFS; i++) {
+		size = ETH_PACKET_SIZE; //2kB buffer size;
+		test_wdi3_ctx->tx_bufs[i].size = size;
+		test_wdi3_ctx->tx_bufs[i].base =
+			dma_alloc_coherent(ipa3_ctx->pdev, size,
+				&test_wdi3_ctx->tx_bufs[i].phys_base,
+				GFP_KERNEL);
+		if (!test_wdi3_ctx->tx_bufs[i].phys_base) {
+			IPA_UT_ERR("fail to alloc memory.\n");
+			num_tx_alloc_bufs = i-1;
+			ret = -ENOMEM;
+			goto fail_tx_bufs;
+		}
+	}
+
+	/* allocate rx buffers */
+	num_rx_alloc_bufs = NUM_RX_BUFS;
+	for (i = 0; i < NUM_RX_BUFS; i++) {
+		size = ETH_PACKET_SIZE + PACKET_HEADER_SIZE; //2kB buffer size;
+		test_wdi3_ctx->rx_bufs[i].size = size;
+		test_wdi3_ctx->rx_bufs[i].base =
+			dma_alloc_coherent(ipa3_ctx->pdev, size,
+				&test_wdi3_ctx->rx_bufs[i].phys_base,
+				GFP_KERNEL);
+		if (!test_wdi3_ctx->rx_bufs[i].phys_base) {
+			IPA_UT_ERR("fail to alloc memory.\n");
+			num_rx_alloc_bufs = i-1;
+			ret = -ENOMEM;
+			goto fail_rx_bufs;
+		}
+	}
+
+	/* allocate tx transfer ring db */
+	test_wdi3_ctx->tx_transfer_ring_db.size = DB_REGISTER_SIZE;
+	test_wdi3_ctx->tx_transfer_ring_db.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, DB_REGISTER_SIZE,
+		&test_wdi3_ctx->tx_transfer_ring_db.phys_base, GFP_KERNEL);
+	if (!test_wdi3_ctx->tx_transfer_ring_db.base) {
+		IPA_UT_ERR("fail to alloc memory\n");
+		ret = -ENOMEM;
+		goto fail_tx_transfer_ring_db;
+	}
+
+	/* allocate tx event ring db */
+	test_wdi3_ctx->tx_event_ring_db.size = DB_REGISTER_SIZE;
+	test_wdi3_ctx->tx_event_ring_db.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, DB_REGISTER_SIZE,
+		&test_wdi3_ctx->tx_event_ring_db.phys_base, GFP_KERNEL);
+	if (!test_wdi3_ctx->tx_event_ring_db.base) {
+		IPA_UT_ERR("fail to alloc memory\n");
+		ret = -ENOMEM;
+		goto fail_tx_event_ring_db;
+	}
+
+	/* allocate rx transfer ring db */
+	test_wdi3_ctx->rx_transfer_ring_db.size = DB_REGISTER_SIZE;
+	test_wdi3_ctx->rx_transfer_ring_db.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, DB_REGISTER_SIZE,
+		&test_wdi3_ctx->rx_transfer_ring_db.phys_base, GFP_KERNEL);
+	if (!test_wdi3_ctx->rx_transfer_ring_db.base) {
+		IPA_UT_ERR("fail to alloc memory\n");
+		ret = -ENOMEM;
+		goto fail_rx_transfer_ring_db;
+	}
+
+	/* allocate rx event ring db */
+	test_wdi3_ctx->rx_event_ring_db.size = DB_REGISTER_SIZE;
+	test_wdi3_ctx->rx_event_ring_db.base =
+		dma_alloc_coherent(ipa3_ctx->pdev, DB_REGISTER_SIZE,
+		&test_wdi3_ctx->rx_event_ring_db.phys_base, GFP_KERNEL);
+	if (!test_wdi3_ctx->rx_event_ring_db.base) {
+		IPA_UT_ERR("fail to alloc memory\n");
+		ret = -ENOMEM;
+		goto fail_rx_event_ring_db;
+	}
+
+	return ret;
+
+fail_rx_event_ring_db:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_transfer_ring_db);
+
+fail_rx_transfer_ring_db:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_event_ring_db);
+
+fail_tx_event_ring_db:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_transfer_ring_db);
+
+fail_tx_transfer_ring_db:
+fail_rx_bufs:
+	for (j = 0; j <= num_rx_alloc_bufs; j++)
+		ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_bufs[j]);
+
+fail_tx_bufs:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_event_ring_addr);
+
+	for (j = 0; j <= num_tx_alloc_bufs; j++)
+		ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_bufs[j]);
+
+fail_rx_event_ring:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_transfer_ring_addr);
+
+fail_rx_transfer_ring:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_event_ring_addr);
+
+fail_tx_event_ring:
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_transfer_ring_addr);
+	return ret;
+}
+
+static int ipa_test_wdi3_free_mmio(void)
+{
+	int i;
+
+	if (!test_wdi3_ctx) {
+		IPA_UT_ERR("test_wdi3_ctx is not initialized.\n");
+		return -EFAULT;
+	}
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_event_ring_db);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_transfer_ring_db);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_event_ring_db);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_transfer_ring_db);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_event_ring_addr);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_transfer_ring_addr);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_event_ring_addr);
+
+	ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_transfer_ring_addr);
+
+	for (i = 0; i < NUM_RX_BUFS; i++)
+		ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->rx_bufs[i]);
+
+	for (i = 0; i < NUM_TX_BUFS; i++)
+		ipa_test_wdi3_free_dma_buff(&test_wdi3_ctx->tx_bufs[i]);
+
+	return 0;
+}
+
+static int ipa_test_wdi3_suite_setup(void **priv)
+{
+	int ret = 0;
+	struct ipa_wdi_init_in_params in;
+	struct ipa_wdi_init_out_params out;
+
+	IPA_UT_DBG("Start WDI3 Setup\n");
+
+	/* init ipa wdi ctx */
+	in.wdi_notify = NULL;
+	in.notify = NULL;
+	in.priv = NULL;
+	in.wdi_version = IPA_WDI_3;
+	ipa_wdi_init(&in, &out);
+
+
+	if (!ipa3_ctx) {
+		IPA_UT_ERR("No IPA ctx\n");
+		return -EINVAL;
+	}
+
+	test_wdi3_ctx = kzalloc(sizeof(struct ipa_test_wdi3_context),
+		GFP_KERNEL);
+	if (!test_wdi3_ctx) {
+		IPA_UT_ERR("failed to allocate ctx\n");
+		return -ENOMEM;
+	}
+
+	ret = ipa_test_wdi3_alloc_mmio();
+	if (ret) {
+		IPA_UT_ERR("failed to alloc mmio\n");
+		goto fail_alloc_mmio;
+	}
+
+	*priv = test_wdi3_ctx;
+	return 0;
+
+fail_alloc_mmio:
+	kfree(test_wdi3_ctx);
+	test_wdi3_ctx = NULL;
+	return ret;
+}
+
+static int ipa_test_wdi3_suite_teardown(void *priv)
+{
+	if (!test_wdi3_ctx)
+		return  0;
+
+	ipa_test_wdi3_free_mmio();
+	kfree(test_wdi3_ctx);
+	test_wdi3_ctx = NULL;
+
+	return 0;
+}
+
+static int ipa_wdi3_setup_pipes(void)
+{
+	struct ipa_wdi_conn_in_params *in_param;
+	struct ipa_wdi_conn_out_params *out_param;
+	struct tx_transfer_ring_ele *tx_transfer, *tx_transfer_base;
+	struct rx_transfer_ring_ele *rx_transfer;
+	void __iomem *rx_uc_db;
+	void __iomem *tx_uc_db;
+	int i, index;
+
+	if (!test_wdi3_ctx) {
+		IPA_UT_ERR("context is empty.\n");
+		return -EFAULT;
+	}
+
+	in_param = kzalloc(sizeof(struct ipa_wdi_conn_in_params),
+		GFP_KERNEL);
+	if (!in_param) {
+		IPA_UT_ERR("failed to allocate in_param\n");
+		return -ENOMEM;
+	}
+
+	out_param = kzalloc(sizeof(struct ipa_wdi_conn_out_params),
+		GFP_KERNEL);
+	if (!out_param) {
+		IPA_UT_ERR("failed to allocate out_param\n");
+		kfree(in_param);
+		return -ENOMEM;
+	}
+
+	memset(in_param, 0, sizeof(struct ipa_wdi_conn_in_params));
+	memset(out_param, 0, sizeof(struct ipa_wdi_conn_out_params));
+
+	/* setup tx parameters */
+	in_param->is_smmu_enabled = false;
+	in_param->u_tx.tx.client = IPA_CLIENT_WLAN2_CONS;
+	in_param->u_tx.tx.transfer_ring_base_pa =
+		test_wdi3_ctx->tx_transfer_ring_addr.phys_base;
+	in_param->u_tx.tx.transfer_ring_size =
+		test_wdi3_ctx->tx_transfer_ring_addr.size;
+	in_param->u_tx.tx.transfer_ring_doorbell_pa =
+		test_wdi3_ctx->tx_transfer_ring_db.phys_base;
+
+	in_param->notify = NULL;
+	in_param->u_tx.tx.event_ring_base_pa =
+		test_wdi3_ctx->tx_event_ring_addr.phys_base;
+	in_param->u_tx.tx.event_ring_size =
+		test_wdi3_ctx->tx_event_ring_addr.size;
+	in_param->u_tx.tx.event_ring_doorbell_pa =
+		test_wdi3_ctx->tx_event_ring_db.phys_base;
+	IPA_UT_DBG("tx_event_ring_db.phys_base %llu\n",
+		test_wdi3_ctx->tx_event_ring_db.phys_base);
+	IPA_UT_DBG("tx_event_ring_db.base %pK\n",
+		test_wdi3_ctx->tx_event_ring_db.base);
+	IPA_UT_DBG("tx_event_ring.phys_base %llu\n",
+		test_wdi3_ctx->tx_event_ring_addr.phys_base);
+	IPA_UT_DBG("tx_event_ring.base %pK\n",
+		test_wdi3_ctx->tx_event_ring_addr.base);
+
+	in_param->u_tx.tx.num_pkt_buffers = NUM_TX_BUFS;
+
+	/* setup rx parameters */
+	in_param->u_rx.rx.client = IPA_CLIENT_WLAN2_PROD;
+	in_param->u_rx.rx.transfer_ring_base_pa =
+		test_wdi3_ctx->rx_transfer_ring_addr.phys_base;
+	in_param->u_rx.rx.transfer_ring_size =
+		test_wdi3_ctx->rx_transfer_ring_addr.size;
+	in_param->u_rx.rx.transfer_ring_doorbell_pa =
+		test_wdi3_ctx->rx_transfer_ring_db.phys_base;
+	in_param->u_rx.rx.pkt_offset = PACKET_HEADER_SIZE;
+
+
+	in_param->u_rx.rx.event_ring_base_pa =
+		test_wdi3_ctx->rx_event_ring_addr.phys_base;
+	in_param->u_rx.rx.event_ring_size =
+		test_wdi3_ctx->rx_event_ring_addr.size;
+	in_param->u_rx.rx.event_ring_doorbell_pa =
+		test_wdi3_ctx->rx_event_ring_db.phys_base;
+
+	IPA_UT_DBG("rx_event_ring_db.phys_base %llu\n",
+		in_param->u_rx.rx.event_ring_doorbell_pa);
+	IPA_UT_DBG("rx_event_ring_db.base %pK\n",
+		test_wdi3_ctx->rx_event_ring_addr.base);
+
+	in_param->u_rx.rx.num_pkt_buffers = NUM_RX_BUFS;
+	if (ipa_wdi_conn_pipes(in_param, out_param)) {
+		IPA_UT_ERR("fail to conn wdi3 pipes.\n");
+		kfree(in_param);
+		kfree(out_param);
+		return -EFAULT;
+	}
+	if (ipa_wdi_enable_pipes()) {
+		IPA_UT_ERR("fail to enable wdi3 pipes.\n");
+		ipa_wdi_disconn_pipes();
+		kfree(in_param);
+		kfree(out_param);
+		return -EFAULT;
+	}
+	test_wdi3_ctx->tx_uc_db_pa = out_param->tx_uc_db_pa;
+	test_wdi3_ctx->rx_uc_db_pa = out_param->rx_uc_db_pa;
+	IPA_UT_DBG("tx_uc_db_pa %llu, rx_uc_db_pa %llu.\n",
+		test_wdi3_ctx->tx_uc_db_pa, test_wdi3_ctx->rx_uc_db_pa);
+
+	rx_uc_db = ioremap(test_wdi3_ctx->rx_uc_db_pa, DB_REGISTER_SIZE);
+	tx_uc_db = ioremap(test_wdi3_ctx->tx_uc_db_pa, DB_REGISTER_SIZE);
+
+	/* setup db registers */
+	*(u32 *)test_wdi3_ctx->rx_transfer_ring_db.base = rx_uc_db_local;
+	*(u32 *)test_wdi3_ctx->rx_event_ring_db.base = 0;
+
+	*(u32 *)test_wdi3_ctx->tx_transfer_ring_db.base = tx_uc_db_local;
+	*(u32 *)test_wdi3_ctx->tx_event_ring_db.base = 0;
+
+	rx_transfer = (struct rx_transfer_ring_ele *)
+		test_wdi3_ctx->rx_transfer_ring_addr.base;
+	for (i = 0; i < NUM_TX_BUFS; i++) {
+		rx_transfer->buf_or_link_desc_addr_info.buffer_addr_low =
+			(u64)test_wdi3_ctx->rx_bufs[i].phys_base & 0xFFFFFFFF;
+		rx_transfer->buf_or_link_desc_addr_info.buffer_addr_high =
+			((u64)test_wdi3_ctx->rx_bufs[i].phys_base >> 32)
+			& 0xFFFFFFFF;
+		rx_transfer++;
+	}
+
+	tx_transfer_base = (struct tx_transfer_ring_ele *)
+		test_wdi3_ctx->tx_transfer_ring_addr.base;
+	index = tx_uc_db_local;
+	for (i = 0; i < NUM_TX_BUFS; i++) {
+		tx_transfer = tx_transfer_base + index;
+		tx_transfer->buf_or_link_desc_addr_info.buffer_addr_low =
+			(u64)test_wdi3_ctx->tx_bufs[i].phys_base & 0xFFFFFFFF;
+		tx_transfer->buf_or_link_desc_addr_info.buffer_addr_high =
+			((u64)test_wdi3_ctx->tx_bufs[i].phys_base >> 32)
+			& 0xFFFFFFFF;
+		index = (index + 1) % NUM_TX_TR_ELE;
+	}
+	ipa_test_wdi3_advance_uc_db(&tx_uc_db_local, NUM_TX_BUFS,
+		sizeof(struct tx_transfer_ring_ele)/4,
+		test_wdi3_ctx->tx_transfer_ring_addr.size);
+	iowrite32(tx_uc_db_local, tx_uc_db);
+	kfree(in_param);
+	kfree(out_param);
+	return 0;
+}
+
+static int ipa_wdi3_teardown_pipes(void)
+{
+	ipa_wdi_disable_pipes();
+	ipa_wdi_disconn_pipes();
+	rx_bf_idx = 0;
+	tx_bf_idx = 0;
+	rx_uc_db_local = 0;
+	tx_uc_db_local = 0;
+	return 0;
+}
+
+static int ipa_wdi3_send_one_packet(void)
+{
+	void __iomem *rx_uc_db;
+	void __iomem *tx_uc_db;
+	u32 *tx_event_ring_db, *rx_transfer_ring_db, *rx_event_ring_db;
+	u32 orig_tx_event_ring_db;
+	u32 orig_rx_event_ring_db;
+	u32 orig_tx_trans_ring_db;
+	u32 *packet;
+	u32 *packet_recv = NULL;
+	struct rx_transfer_ring_ele *rx_transfer;
+	struct rx_event_ring_ele *rx_event;
+	struct tx_event_ring_ele *tx_event;
+	struct tx_transfer_ring_ele *tx_transfer;
+	struct buffer_addr_info rx_buf;
+	dma_addr_t recv_packet_addr;
+	int loop_cnt, i, num_words;
+	int idx;
+
+	/* populate packet content */
+	rx_uc_db = ioremap(test_wdi3_ctx->rx_uc_db_pa, DB_REGISTER_SIZE);
+	num_words = sizeof(struct rx_transfer_ring_ele) / 4;
+	idx = rx_uc_db_local / num_words;
+	packet = (u32 *)test_wdi3_ctx->rx_bufs[rx_bf_idx].base +
+		PACKET_HEADER_SIZE/4;
+	*packet = PACKET_CONTENT;
+	IPA_UT_DBG("local rx uc db: %u, rx buffer index %d\n",
+		rx_uc_db_local, rx_bf_idx);
+	rx_bf_idx = (rx_bf_idx  + 1) % NUM_RX_BUFS;
+	/* update rx_transfer_ring_ele */
+	rx_transfer = (struct rx_transfer_ring_ele *)
+		(test_wdi3_ctx->rx_transfer_ring_addr.base) +
+		idx;
+
+	ipa_test_wdi3_advance_uc_db(&rx_uc_db_local, 1,
+		sizeof(struct rx_transfer_ring_ele)/4,
+		test_wdi3_ctx->rx_transfer_ring_addr.size);
+	rx_transfer->rx_msdu_desc_info_details.msdu_length =
+		ETH_PACKET_SIZE + PACKET_HEADER_SIZE;
+
+	rx_buf.buffer_addr_low =
+		rx_transfer->buf_or_link_desc_addr_info.buffer_addr_low;
+	rx_buf.buffer_addr_high =
+		rx_transfer->buf_or_link_desc_addr_info.buffer_addr_high;
+
+	tx_event_ring_db = (u32 *)test_wdi3_ctx->tx_event_ring_db.base;
+	orig_tx_event_ring_db = *tx_event_ring_db;
+	IPA_UT_DBG("original tx event ring db: %u\n",
+		orig_tx_event_ring_db);
+
+	rx_event_ring_db = (u32 *)test_wdi3_ctx->rx_event_ring_db.base;
+	orig_rx_event_ring_db = *rx_event_ring_db;
+	IPA_UT_DBG("original rx event ring db: %u\n",
+		orig_rx_event_ring_db);
+
+	rx_transfer_ring_db
+		= (u32 *)test_wdi3_ctx->rx_transfer_ring_db.base;
+	orig_tx_trans_ring_db = *rx_transfer_ring_db;
+	IPA_UT_DBG("original rx transfer ring db: %u\n",
+		*rx_transfer_ring_db);
+
+	/* ring uc db */
+	iowrite32(rx_uc_db_local, rx_uc_db);
+	IPA_UT_DBG("rx db local: %u\n", rx_uc_db_local);
+
+	loop_cnt = 0;
+	while (orig_tx_event_ring_db == *tx_event_ring_db ||
+		*rx_event_ring_db == orig_rx_event_ring_db) {
+		loop_cnt++;
+		IPA_UT_DBG("loop count: %d tx\n", loop_cnt);
+		IPA_UT_DBG("orig_tx_event_ring_db: %u tx_event_ring_db: %u\n",
+			orig_tx_event_ring_db, *tx_event_ring_db);
+		IPA_UT_DBG("rx_transfer_ring_db: %u rx db local: %u\n",
+			*rx_transfer_ring_db, rx_uc_db_local);
+		IPA_UT_DBG("orig_rx_event_ring_db: %u rx_event_ring_db %u\n",
+			orig_rx_event_ring_db, *rx_event_ring_db);
+		if (loop_cnt == 1000) {
+			IPA_UT_ERR("transfer timeout!\n");
+			gsi_wdi3_dump_register(1);
+			gsi_wdi3_dump_register(9);
+			BUG();
+			return -EFAULT;
+		}
+		usleep_range(1000, 1001);
+	}
+	IPA_UT_DBG("rx_transfer_ring_db: %u\n", *rx_transfer_ring_db);
+	IPA_UT_DBG("tx_event_ring_db: %u\n", *tx_event_ring_db);
+	num_words = sizeof(struct rx_event_ring_ele)/4;
+	rx_event = (struct rx_event_ring_ele *)
+		(test_wdi3_ctx->rx_event_ring_addr.base) +
+		(*rx_event_ring_db/num_words - 1 + NUM_RX_ER_ELE) %
+		NUM_RX_ER_ELE;
+	IPA_UT_DBG("rx_event offset: %u\n",
+		(*rx_event_ring_db/num_words - 1 + NUM_RX_ER_ELE) %
+		NUM_RX_ER_ELE);
+	IPA_UT_DBG("rx_event va: %pK\n", rx_event);
+	IPA_UT_DBG("rx event low: %u rx event high: %u\n",
+		rx_event->buf_or_link_desc_addr_info.buffer_addr_low,
+		rx_event->buf_or_link_desc_addr_info.buffer_addr_high);
+	IPA_UT_DBG("rx buf low: %u rx buf high: %u\n",
+		rx_buf.buffer_addr_low, rx_buf.buffer_addr_high);
+	if (rx_event->buf_or_link_desc_addr_info.buffer_addr_low !=
+		rx_buf.buffer_addr_low ||
+		rx_event->buf_or_link_desc_addr_info.buffer_addr_high !=
+		rx_buf.buffer_addr_high) {
+		IPA_UT_ERR("rx event ring buf addr doesn't match.\n");
+		BUG();
+		return -EFAULT;
+	}
+
+	num_words = sizeof(struct tx_event_ring_ele)/4;
+	tx_event = (struct tx_event_ring_ele *)
+		test_wdi3_ctx->tx_event_ring_addr.base +
+		(*tx_event_ring_db/num_words - 1 + NUM_TX_ER_ELE) %
+		NUM_TX_ER_ELE;
+	IPA_UT_DBG("tx_event va: %pK\n", tx_event);
+	IPA_UT_DBG("tx event offset: %u\n",
+		(*tx_event_ring_db/num_words - 1 + NUM_TX_ER_ELE) %
+		NUM_TX_ER_ELE);
+	IPA_UT_DBG("recv addr low: %u recv_addr high: %u\n",
+		tx_event->buf_or_link_desc_addr_info.buffer_addr_low,
+		tx_event->buf_or_link_desc_addr_info.buffer_addr_high);
+	recv_packet_addr =
+		((u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_high
+		 << 32) |
+		(u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_low;
+	IPA_UT_DBG("high: %llu low: %llu all: %llu\n",
+		(u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_high
+			   << 32,
+		(u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_low,
+		recv_packet_addr);
+	for (i = 0; i < NUM_TX_BUFS; i++)
+		if (recv_packet_addr == test_wdi3_ctx->tx_bufs[i].phys_base) {
+			IPA_UT_DBG("found buf at position %d\n", i);
+			packet_recv = (u32 *)test_wdi3_ctx->tx_bufs[i].base;
+		}
+	IPA_UT_DBG("packet_recv addr: %pK\n", packet_recv);
+	if (*packet_recv != PACKET_CONTENT) {
+		IPA_UT_ERR("recv packet doesn't match.\n");
+		IPA_UT_ERR("packet: %d packet_recv: %d\n", PACKET_CONTENT,
+			*packet_recv);
+		return -EFAULT;
+	}
+	IPA_UT_INFO("recv packet matches!! Recycling the buffer ...\n");
+	/* recycle buffer */
+	tx_uc_db = ioremap(test_wdi3_ctx->tx_uc_db_pa, DB_REGISTER_SIZE);
+	num_words = sizeof(struct tx_transfer_ring_ele) / 4;
+	idx = tx_uc_db_local / num_words;
+	IPA_UT_DBG("tx_db_local: %u idx %d\n", tx_uc_db_local, idx);
+	tx_transfer = (struct tx_transfer_ring_ele *)
+		test_wdi3_ctx->tx_transfer_ring_addr.base + idx;
+	tx_transfer->buf_or_link_desc_addr_info.buffer_addr_low =
+		tx_event->buf_or_link_desc_addr_info.buffer_addr_low;
+	tx_transfer->buf_or_link_desc_addr_info.buffer_addr_high =
+		tx_event->buf_or_link_desc_addr_info.buffer_addr_high;
+	ipa_test_wdi3_advance_uc_db(&tx_uc_db_local, 1,
+		sizeof(struct tx_transfer_ring_ele)/4,
+		test_wdi3_ctx->tx_transfer_ring_addr.size);
+	iowrite32(tx_uc_db_local, tx_uc_db);
+	tx_bf_idx = (tx_bf_idx + 1) % NUM_TX_BUFS;
+	return 0;
+}
+
+static int ipa_wdi3_test_reg_intf(void)
+{
+	struct ipa_wdi_reg_intf_in_params in;
+	char netdev_name[IPA_RESOURCE_NAME_MAX] = {0};
+	u8 hdr_content = 1;
+
+	memset(&in, 0, sizeof(in));
+	snprintf(netdev_name, sizeof(netdev_name), "wdi3_test");
+	in.netdev_name = netdev_name;
+	in.is_meta_data_valid = 0;
+	in.hdr_info[0].hdr = &hdr_content;
+	in.hdr_info[0].hdr_len = 1;
+	in.hdr_info[0].dst_mac_addr_offset = 0;
+	in.hdr_info[0].hdr_type = IPA_HDR_L2_ETHERNET_II;
+	in.hdr_info[1].hdr = &hdr_content;
+	in.hdr_info[1].hdr_len = 1;
+	in.hdr_info[1].dst_mac_addr_offset = 0;
+	in.hdr_info[1].hdr_type = IPA_HDR_L2_ETHERNET_II;
+
+	return ipa_wdi_reg_intf(&in);
+}
+
+static int ipa_wdi3_test_dereg_intf(void)
+{
+	char netdev_name[IPA_RESOURCE_NAME_MAX] = {0};
+
+	snprintf(netdev_name, sizeof(netdev_name), "wdi3_test");
+	IPA_UT_INFO("netdev name: %s strlen: %lu\n", netdev_name,
+				strlen(netdev_name));
+
+	return ipa_wdi_dereg_intf(netdev_name);
+}
+
+static int ipa_wdi3_test_single_transfer(void *priv)
+{
+	struct ipa_ep_cfg ep_cfg = { {0} };
+
+	if (ipa_wdi3_test_reg_intf()) {
+		IPA_UT_ERR("fail to register intf.\n");
+		return -EFAULT;
+	}
+
+	if (ipa_wdi3_setup_pipes()) {
+		IPA_UT_ERR("fail to setup wdi3 pipes.\n");
+		return -EFAULT;
+	}
+
+	/* configure WLAN RX EP in DMA mode */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = IPA_CLIENT_WLAN2_CONS;
+
+	ep_cfg.seq.set_dynamic = true;
+
+	ipa_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD), &ep_cfg);
+
+	if (ipa_wdi3_send_one_packet()) {
+		IPA_UT_ERR("fail to transfer packet.\n");
+		ipa_wdi3_teardown_pipes();
+		return -EFAULT;
+	}
+
+	if (ipa_wdi3_teardown_pipes()) {
+		IPA_UT_ERR("fail to tear down pipes.\n");
+		return -EFAULT;
+	}
+
+	IPA_UT_INFO("pipes were torn down!\n");
+
+	if (ipa_wdi3_test_dereg_intf()) {
+		IPA_UT_ERR("fail to deregister interface.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa_wdi3_send_multi_packet(void)
+{
+	void __iomem *rx_uc_db;
+	void __iomem *tx_uc_db;
+	u32 *tx_event_ring_db, *rx_transfer_ring_db, *rx_event_ring_db;
+	u32 orig_tx_event_ring_db;
+	u32 orig_rx_event_ring_db;
+	u32 *packet;
+	u32 *packet_recv = NULL;
+	struct rx_transfer_ring_ele *rx_transfer;
+	struct rx_event_ring_ele *rx_event;
+	struct tx_event_ring_ele *tx_event;
+	struct tx_transfer_ring_ele *tx_transfer;
+	struct buffer_addr_info rx_buf;
+	dma_addr_t recv_packet_addr;
+	int loop_cnt, i, num_words;
+	int idx;
+
+	/* populate packet content */
+	num_words = sizeof(struct rx_transfer_ring_ele) / 4;
+	rx_uc_db = ioremap(test_wdi3_ctx->rx_uc_db_pa, DB_REGISTER_SIZE);
+	for (i = 0; i < NUM_MULTI_PKT; i++) {
+		idx = rx_uc_db_local / num_words;
+		packet = (u32 *)test_wdi3_ctx->rx_bufs[rx_bf_idx].base
+			+ PACKET_HEADER_SIZE / 4;
+		*packet = multi_pkt_array[i];
+		IPA_UT_DBG("rx_db_local: %u rx_bf_idx: %d\n",
+			rx_uc_db_local, rx_bf_idx);
+		rx_bf_idx = (rx_bf_idx  + 1) % NUM_RX_BUFS;
+		/* update rx_transfer_ring_ele */
+		rx_transfer = (struct rx_transfer_ring_ele *)
+			test_wdi3_ctx->rx_transfer_ring_addr.base + idx;
+		ipa_test_wdi3_advance_uc_db(&rx_uc_db_local, 1,
+			sizeof(struct rx_transfer_ring_ele)/4,
+			test_wdi3_ctx->rx_transfer_ring_addr.size);
+		rx_transfer->rx_msdu_desc_info_details.msdu_length =
+			ETH_PACKET_SIZE + PACKET_HEADER_SIZE;
+		rx_buf.buffer_addr_low =
+		rx_transfer->buf_or_link_desc_addr_info.buffer_addr_low;
+		rx_buf.buffer_addr_high =
+		rx_transfer->buf_or_link_desc_addr_info.buffer_addr_high;
+	}
+
+	tx_event_ring_db = (u32 *)test_wdi3_ctx->tx_event_ring_db.base;
+	orig_tx_event_ring_db = *tx_event_ring_db;
+	IPA_UT_DBG("original tx event ring db: %u\n", orig_tx_event_ring_db);
+
+	rx_event_ring_db = (u32 *)test_wdi3_ctx->rx_event_ring_db.base;
+	orig_rx_event_ring_db = *rx_event_ring_db;
+	IPA_UT_DBG("original rx event ring db: %u\n", orig_rx_event_ring_db);
+
+	rx_transfer_ring_db = (u32 *)test_wdi3_ctx->rx_transfer_ring_db.base;
+	IPA_UT_DBG("original rx transfer ring db: %u\n", *rx_transfer_ring_db);
+
+	/* ring uc db */
+	iowrite32(rx_uc_db_local, rx_uc_db);
+	IPA_UT_DBG("rx db local: %u\n", rx_uc_db_local);
+
+	loop_cnt = 0;
+	while (orig_tx_event_ring_db == *tx_event_ring_db ||
+		*rx_transfer_ring_db != rx_uc_db_local ||
+		orig_rx_event_ring_db == *rx_event_ring_db) {
+		loop_cnt++;
+		IPA_UT_DBG("loop count: %d tx\n", loop_cnt);
+		IPA_UT_DBG("orig_tx_event_ring_db: %u tx_event_ring_db: %u\n",
+			orig_tx_event_ring_db, *tx_event_ring_db);
+		IPA_UT_DBG("rx_transfer_ring_db: %u rx db local: %u\n",
+			*rx_transfer_ring_db, rx_uc_db_local);
+		IPA_UT_DBG("orig_rx_event_ring_db: %u rx_event_ring_db %u\n",
+			orig_rx_event_ring_db, *rx_event_ring_db);
+		if (loop_cnt == 1000) {
+			IPA_UT_ERR("transfer timeout!\n");
+			BUG();
+			return -EFAULT;
+		}
+		usleep_range(1000, 1001);
+	}
+
+	IPA_UT_DBG("rx_transfer_ring_db: %u\n", *rx_transfer_ring_db);
+	IPA_UT_DBG("tx_event_ring_db: %u\n", *tx_event_ring_db);
+	num_words = sizeof(struct rx_event_ring_ele)/4;
+	rx_event = (struct rx_event_ring_ele *)
+		test_wdi3_ctx->rx_event_ring_addr.base +
+		(*rx_event_ring_db/num_words - 1 + NUM_RX_ER_ELE) %
+		NUM_RX_ER_ELE;
+	IPA_UT_DBG("rx_event va: %pK\n", rx_event);
+
+	IPA_UT_DBG("rx event low: %u rx event high: %u\n",
+		rx_event->buf_or_link_desc_addr_info.buffer_addr_low,
+		rx_event->buf_or_link_desc_addr_info.buffer_addr_high);
+	IPA_UT_DBG("rx buf low: %u rx buf high: %u\n",
+		rx_buf.buffer_addr_low, rx_buf.buffer_addr_high);
+
+	if (rx_event->buf_or_link_desc_addr_info.buffer_addr_low !=
+		rx_buf.buffer_addr_low ||
+		rx_event->buf_or_link_desc_addr_info.buffer_addr_high !=
+		rx_buf.buffer_addr_high) {
+		IPA_UT_ERR("rx event ring buf addr doesn't match.\n");
+		return -EFAULT;
+	}
+	num_words = sizeof(struct tx_event_ring_ele)/4;
+	tx_event = (struct tx_event_ring_ele *)
+		test_wdi3_ctx->tx_event_ring_addr.base +
+		(*tx_event_ring_db/num_words - NUM_MULTI_PKT + NUM_TX_ER_ELE) %
+		NUM_TX_ER_ELE;
+	IPA_UT_DBG("tx_event va: %pK\n", tx_event);
+	IPA_UT_DBG("recv addr low: %u recv_addr high: %u\n",
+		tx_event->buf_or_link_desc_addr_info.buffer_addr_low,
+		tx_event->buf_or_link_desc_addr_info.buffer_addr_high);
+	recv_packet_addr =
+		((u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_high
+		 << 32) |
+		(u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_low;
+	IPA_UT_DBG("high: %llu low: %llu all: %llu\n",
+		(u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_high
+			   << 32,
+		(u64)tx_event->buf_or_link_desc_addr_info.buffer_addr_low,
+		recv_packet_addr);
+	for (i = 0; i < NUM_TX_BUFS; i++)
+		if (recv_packet_addr == test_wdi3_ctx->tx_bufs[i].phys_base) {
+			IPA_UT_INFO("found buf at position %d\n", i);
+			packet_recv = (u32 *)test_wdi3_ctx->tx_bufs[i].base;
+		}
+
+	if (*packet_recv != multi_pkt_array[0]) {
+		IPA_UT_ERR("recv packet doesn't match.\n");
+		IPA_UT_ERR("packet: %d packet_recv: %d\n",
+			multi_pkt_array[0], *packet_recv);
+		return -EFAULT;
+	}
+
+	IPA_UT_INFO("recv packet matches.\n");
+
+	/* recycle buffer */
+	tx_uc_db = ioremap(test_wdi3_ctx->tx_uc_db_pa, DB_REGISTER_SIZE);
+	num_words = sizeof(struct tx_transfer_ring_ele) / 4;
+
+	for (i = 0; i < NUM_MULTI_PKT; i++) {
+		idx = tx_uc_db_local / num_words;
+		IPA_UT_DBG("tx_db_local: %u idx %d\n", tx_uc_db_local, idx);
+		tx_event = (struct tx_event_ring_ele *)
+			test_wdi3_ctx->tx_event_ring_addr.base +
+			(*tx_event_ring_db/num_words - NUM_MULTI_PKT
+			+ i + NUM_TX_ER_ELE) % NUM_TX_ER_ELE;
+		tx_transfer = (struct tx_transfer_ring_ele *)
+			test_wdi3_ctx->tx_transfer_ring_addr.base + idx;
+		tx_transfer->buf_or_link_desc_addr_info.buffer_addr_low =
+			tx_event->buf_or_link_desc_addr_info.buffer_addr_low;
+		tx_transfer->buf_or_link_desc_addr_info.buffer_addr_high =
+			tx_event->buf_or_link_desc_addr_info.buffer_addr_high;
+		ipa_test_wdi3_advance_uc_db(&tx_uc_db_local, 1,
+			sizeof(struct tx_transfer_ring_ele)/4,
+			test_wdi3_ctx->tx_transfer_ring_addr.size);
+	}
+	iowrite32(tx_uc_db_local, tx_uc_db);
+	tx_bf_idx = (tx_bf_idx + NUM_MULTI_PKT) % NUM_TX_BUFS;
+	return 0;
+}
+
+static int ipa_wdi3_test_multi_transfer(void *priv)
+{
+	struct ipa_ep_cfg ep_cfg = { {0} };
+
+	if (ipa_wdi3_test_reg_intf()) {
+		IPA_UT_ERR("fail to register intf.\n");
+		return -EFAULT;
+	}
+
+	if (ipa_wdi3_setup_pipes()) {
+		IPA_UT_ERR("fail to setup wdi3 pipes.\n");
+		return -EFAULT;
+	}
+
+	/* configure WLAN RX EP in DMA mode */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = IPA_CLIENT_WLAN2_CONS;
+
+	ep_cfg.seq.set_dynamic = true;
+
+	ipa_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD), &ep_cfg);
+
+	if (ipa_wdi3_send_multi_packet()) {
+		IPA_UT_ERR("fail to transfer packet.\n");
+		ipa_wdi3_teardown_pipes();
+		return -EFAULT;
+	}
+
+	if (ipa_wdi3_teardown_pipes()) {
+		IPA_UT_ERR("fail to tear down pipes.\n");
+		return -EFAULT;
+	}
+
+	IPA_UT_INFO("pipes were torn down!\n");
+
+	if (ipa_wdi3_test_dereg_intf()) {
+		IPA_UT_ERR("fail to deregister interface.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa_wdi3_test_multi_transfer2(void *priv)
+{
+	struct ipa_ep_cfg ep_cfg = { {0} };
+	int i;
+
+	if (ipa_wdi3_test_reg_intf()) {
+		IPA_UT_ERR("fail to register intf.\n");
+		return -EFAULT;
+	}
+
+	if (ipa_wdi3_setup_pipes()) {
+		IPA_UT_ERR("fail to setup wdi3 pipes.\n");
+		return -EFAULT;
+	}
+
+	/* configure WLAN RX EP in DMA mode */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = IPA_CLIENT_WLAN2_CONS;
+
+	ep_cfg.seq.set_dynamic = true;
+
+	ipa_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD), &ep_cfg);
+
+	IPA_UT_DBG("-----start transfer 32 pkt----\n");
+	for (i = 0; i < 32; i++) {
+		IPA_UT_DBG("--transferring num #%d pkt--\n", i + 1);
+		if (ipa_wdi3_send_one_packet()) {
+			IPA_UT_ERR("fail to transfer packet.\n");
+			ipa_wdi3_teardown_pipes();
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_wdi3_teardown_pipes()) {
+		IPA_UT_ERR("fail to tear down pipes.\n");
+		return -EFAULT;
+	}
+
+	IPA_UT_ERR("pipes were torn down!\n");
+
+	if (ipa_wdi3_test_dereg_intf()) {
+		IPA_UT_ERR("fail to deregister interface.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa_wdi3_test_multi_transfer3(void *priv)
+{
+	struct ipa_ep_cfg ep_cfg = { {0} };
+	int i;
+
+	if (ipa_wdi3_test_reg_intf()) {
+		IPA_UT_ERR("fail to register intf.\n");
+		return -EFAULT;
+	}
+
+	if (ipa_wdi3_setup_pipes()) {
+		IPA_UT_ERR("fail to setup wdi3 pipes.\n");
+		return -EFAULT;
+	}
+
+	/* configure WLAN RX EP in DMA mode */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = IPA_CLIENT_WLAN2_CONS;
+
+	ep_cfg.seq.set_dynamic = true;
+
+	ipa_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD), &ep_cfg);
+
+	IPA_UT_DBG("-----start transfer 256 pkt----\n");
+	for (i = 0; i < 32; i++) {
+		IPA_UT_DBG("--transferring num # %d to num # %d pkt--\n",
+			(i + 1) * 8, (i + 2) * 8 - 1);
+		if (ipa_wdi3_send_multi_packet()) {
+			IPA_UT_ERR("fail to transfer packet.\n");
+			ipa_wdi3_teardown_pipes();
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_wdi3_teardown_pipes()) {
+		IPA_UT_ERR("fail to tear down pipes.\n");
+		return -EFAULT;
+	}
+
+	IPA_UT_ERR("pipes were torn down!\n");
+
+	if (ipa_wdi3_test_dereg_intf()) {
+		IPA_UT_ERR("fail to deregister interface.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(wdi3, "WDI3 tests",
+	ipa_test_wdi3_suite_setup, ipa_test_wdi3_suite_teardown)
+{
+	IPA_UT_ADD_TEST(single_transfer,
+		"single data transfer",
+		ipa_wdi3_test_single_transfer,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(multi_transfer,
+		"multiple data transfer",
+		ipa_wdi3_test_multi_transfer,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(multi_transfer2,
+		"multiple data transfer with data wrap around",
+		ipa_wdi3_test_multi_transfer2,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(multi_transfer3,
+		"multiple data transfer with data wrap around2",
+		ipa_wdi3_test_multi_transfer3,
+		true, IPA_HW_v3_0, IPA_HW_MAX)
+} IPA_UT_DEFINE_SUITE_END(wdi3);
+
+
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index b452da8..0b72c48 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPA_UT_SUITE_LIST_H_
@@ -18,6 +18,7 @@
 IPA_UT_DECLARE_SUITE(pm);
 IPA_UT_DECLARE_SUITE(example);
 IPA_UT_DECLARE_SUITE(hw_stats);
+IPA_UT_DECLARE_SUITE(wdi3);
 
 
 /**
@@ -31,6 +32,7 @@
 	IPA_UT_REGISTER_SUITE(pm),
 	IPA_UT_REGISTER_SUITE(example),
 	IPA_UT_REGISTER_SUITE(hw_stats),
+	IPA_UT_REGISTER_SUITE(wdi3),
 } IPA_UT_DEFINE_ALL_SUITES_END;
 
 #endif /* _IPA_UT_SUITE_LIST_H_ */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 0612124..2403bfa 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -510,6 +510,19 @@
 	  from logical nodes to hardware nodes controlled by the BCM (Bus
 	  Clock Manager)
 
+config MSM_SPCOM
+	depends on QCOM_GLINK
+	bool "Secure Processor Communication over GLINK"
+	help
+	  spcom driver allows loading Secure Processor Applications and
+	  sending messages to Secure Processor Applications.
+	  spcom provides interface to both user space app and kernel driver.
+	  It is using glink as the transport layer, which provides multiple
+	  logical channels over single physical channel.
+	  The physical layer is based on shared memory and interrupts.
+	  spcom provides clients/server API, although currently only one client
+	  or server is allowed per logical channel.
+
 config QSEE_IPC_IRQ
 	bool "QSEE interrupt manager"
 	help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 4b966ed..2d144fea 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -44,6 +44,7 @@
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
+obj-$(CONFIG_MSM_SPCOM) += spcom.o
 obj-$(CONFIG_MSM_CDSP_LOADER) += qdsp6v2/
 obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
 
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
new file mode 100644
index 0000000..c6987f3
--- /dev/null
+++ b/drivers/soc/qcom/spcom.c
@@ -0,0 +1,2200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Secure-Processor-Communication (SPCOM).
+ *
+ * This driver provides communication to Secure Processor (SP)
+ * over RPMSG framework.
+ *
+ * It provides interface to userspace spcomlib.
+ *
+ * Userspace application shall use spcomlib for communication with SP. Userspace
+ * application can be either client or server. spcomlib shall use write() file
+ * operation to send data, and read() file operation to read data.
+ *
+ * This driver uses RPMSG with glink-spss as a transport layer.
+ * This driver exposes "/dev/<sp-channel-name>" file node for each rpmsg logical
+ * channel.
+ * This driver exposes "/dev/spcom" file node for some debug/control command.
+ * The predefined channel "/dev/sp_kernel" is used for loading SP application
+ * from HLOS.
+ * This driver exposes "/dev/sp_ssr" file node to allow user space poll for SSR.
+ * After the remote SP App is loaded, this driver exposes a new file node
+ * "/dev/<ch-name>" for the matching HLOS App to use.
+ * The access to predefined file nodes and dynamically allocated file nodes is
+ * restricted by using unix group and SELinux.
+ *
+ * No message routing is used, but using the rpmsg/G-Link "multiplexing" feature
+ * to use a dedicated logical channel for HLOS and SP Application-Pair.
+ *
+ * Each HLOS/SP Application can be either Client or Server or both,
+ * Messaging is allways point-to-point between 2 HLOS<=>SP applications.
+ * Each channel exclusevly used by single Client or Server.
+ *
+ * User Space Request & Response are synchronous.
+ * read() & write() operations are blocking until completed or terminated.
+ */
+#define pr_fmt(fmt)	KBUILD_MODNAME ": %s: " fmt, __func__
+
+#include <linux/kernel.h>	/* min()             */
+#include <linux/module.h>	/* MODULE_LICENSE    */
+#include <linux/device.h>	/* class_create()    */
+#include <linux/slab.h>	        /* kzalloc()         */
+#include <linux/fs.h>		/* file_operations   */
+#include <linux/cdev.h>	        /* cdev_add()        */
+#include <linux/errno.h>	/* EINVAL, ETIMEDOUT */
+#include <linux/printk.h>	/* pr_err()          */
+#include <linux/bitops.h>	/* BIT(x)            */
+#include <linux/completion.h>	/* wait_for_completion_timeout() */
+#include <linux/poll.h>	/* POLLOUT */
+#include <linux/platform_device.h>
+#include <linux/of.h>		/* of_property_count_strings() */
+#include <linux/workqueue.h>
+#include <linux/delay.h>	/* msleep() */
+#include <linux/dma-buf.h>
+#include <linux/limits.h>
+#include <linux/rpmsg.h>
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <uapi/linux/spcom.h>
+#include <soc/qcom/subsystem_restart.h>
+
+/**
+ * Request buffer size.
+ * Any large data (multiply of 4KB) is provided by temp buffer in DDR.
+ * Request shall provide the temp buffer physical address (align to 4KB).
+ * Maximum request/response size of 268 is used to accommodate APDU size.
+ * From kernel spcom driver perspective a PAGE_SIZE of 4K
+ * is the actual maximum size for a single read/write file operation.
+ */
+#define SPCOM_MAX_RESPONSE_SIZE		268
+
+/* SPCOM driver name */
+#define DEVICE_NAME	"spcom"
+
+/* maximum ION buffers should be >= SPCOM_MAX_CHANNELS  */
+#define SPCOM_MAX_ION_BUF_PER_CH (SPCOM_MAX_CHANNELS + 4)
+
+/* maximum ION buffer per send request/response command */
+#define SPCOM_MAX_ION_BUF_PER_CMD SPCOM_MAX_ION_BUF
+
+/* Maximum command size */
+#define SPCOM_MAX_COMMAND_SIZE	(PAGE_SIZE)
+
+/* Maximum input size */
+#define SPCOM_MAX_READ_SIZE	(PAGE_SIZE)
+
+/* Current Process ID */
+#define current_pid() ((u32)(current->pid))
+
+/*
+ * After both sides get CONNECTED,
+ * there is a race between one side queueing rx buffer and the other side
+ * trying to call glink_tx() , this race is only on the 1st tx.
+ * Do tx retry with some delay to allow the other side to queue rx buffer.
+ */
+#define TX_RETRY_DELAY_MSEC	100
+
+/* SPCOM_MAX_REQUEST_SIZE-or-SPCOM_MAX_RESPONSE_SIZE + header */
+#define SPCOM_RX_BUF_SIZE	300
+
+/*
+ * Initial transaction id, use non-zero nonce for debug.
+ * Incremented by client on request, and copied back by server on response.
+ */
+#define INITIAL_TXN_ID	0x12345678
+
+/**
+ * struct spcom_msg_hdr - Request/Response message header between HLOS and SP.
+ *
+ * This header is proceeding any request specific parameters.
+ * The transaction id is used to match request with response.
+ * Note: rpmsg API provides the rx/tx data size, so user payload size is
+ * calculated by reducing the header size.
+ */
+struct spcom_msg_hdr {
+	uint32_t reserved;	/* for future use */
+	uint32_t txn_id;	/* transaction id */
+	char buf[0];		/* Variable buffer size, must be last field */
+} __packed;
+
+/**
+ * struct spcom_client - Client handle
+ */
+struct spcom_client {
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_server - Server handle
+ */
+struct spcom_server {
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_channel - channel context
+ */
+struct spcom_channel {
+	char name[SPCOM_CHANNEL_NAME_SIZE];
+	struct mutex lock;
+	uint32_t txn_id;           /* incrementing nonce per client request */
+	bool is_server;            /* for txn_id and response_timeout_msec  */
+	bool comm_role_undefined;  /* is true on channel creation, before   */
+				   /* first tx/rx on channel                */
+	uint32_t response_timeout_msec; /* for client only */
+
+	/* char dev */
+	struct cdev *cdev;
+	struct device *dev;
+	struct device_attribute attr;
+
+	/* rpmsg */
+	struct rpmsg_driver *rpdrv;
+	struct rpmsg_device *rpdev;
+
+	/* Events notification */
+	struct completion rx_done;
+	struct completion connect;
+
+	/*
+	 * Only one client or server per channel.
+	 * Only one rx/tx transaction at a time (request + response).
+	 */
+	bool is_busy;
+
+	u32 pid; /* debug only to find user space application */
+
+	/* abort flags */
+	bool rpmsg_abort;
+
+	/* rx data info */
+	size_t actual_rx_size;	/* actual data size received */
+	void *rpmsg_rx_buf;
+
+	/* shared buffer lock/unlock support */
+	int dmabuf_fd_table[SPCOM_MAX_ION_BUF_PER_CH];
+	struct dma_buf *dmabuf_handle_table[SPCOM_MAX_ION_BUF_PER_CH];
+};
+
+/**
+ * struct rx_buff_list - holds rx rpmsg data, before it will be consumed
+ * by spcom_signal_rx_done worker, item per rx packet
+ */
+struct rx_buff_list {
+	struct list_head list;
+
+	void *rpmsg_rx_buf;
+	int   rx_buf_size;
+	struct spcom_channel *ch;
+};
+
+/**
+ * struct spcom_device - device state structure.
+ */
+struct spcom_device {
+	char predefined_ch_name[SPCOM_MAX_CHANNELS][SPCOM_CHANNEL_NAME_SIZE];
+
+	/* char device info */
+	struct cdev cdev;
+	dev_t device_no;
+	struct class *driver_class;
+	struct device *class_dev;
+	struct platform_device *pdev;
+
+	/* rpmsg channels */
+	struct spcom_channel channels[SPCOM_MAX_CHANNELS];
+	atomic_t chdev_count;
+
+	struct completion rpmsg_state_change;
+	atomic_t rpmsg_dev_count;
+
+	/* rx data path */
+	struct list_head    rx_list_head;
+	spinlock_t          rx_lock;
+};
+
+/* Device Driver State */
+static struct spcom_device *spcom_dev;
+
+/* static functions declaration */
+static int spcom_create_channel_chardev(const char *name);
+static struct spcom_channel *spcom_find_channel_by_name(const char *name);
+static int spcom_register_rpmsg_drv(struct spcom_channel *ch);
+static int spcom_unregister_rpmsg_drv(struct spcom_channel *ch);
+
+/**
+ * spcom_is_channel_open() - channel is open on this side.
+ *
+ * Channel is fully connected, when rpmsg driver is registered and
+ * rpmsg device probed
+ */
+static inline bool spcom_is_channel_open(struct spcom_channel *ch)
+{
+	return ch->rpdrv != NULL;
+}
+
+/**
+ * spcom_is_channel_connected() - channel is fully connected by both sides.
+ */
+static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
+{
+	/* Channel must be open before it gets connected */
+	if (!spcom_is_channel_open(ch))
+		return false;
+
+	return ch->rpdev != NULL;
+}
+
+/**
+ * spcom_create_predefined_channels_chardev() - expose predefined channels to
+ * user space.
+ *
+ * Predefined channels list is provided by device tree.  Typically, it is for
+ * known servers on remote side that are not loaded by the HLOS
+ */
+static int spcom_create_predefined_channels_chardev(void)
+{
+	int i;
+	int ret;
+	static bool is_predefined_created;
+
+	if (is_predefined_created)
+		return 0;
+
+	for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
+		const char *name = spcom_dev->predefined_ch_name[i];
+
+		if (name[0] == 0)
+			break;
+		ret = spcom_create_channel_chardev(name);
+		if (ret) {
+			pr_err("failed to create chardev [%s], ret [%d]\n",
+			       name, ret);
+			return -EFAULT;
+		}
+	}
+
+	is_predefined_created = true;
+
+	return 0;
+}
+
+/*======================================================================*/
+/*		UTILITIES						*/
+/*======================================================================*/
+
+/**
+ * spcom_init_channel() - initialize channel state.
+ *
+ * @ch: channel state struct pointer
+ * @name: channel name
+ */
+static int spcom_init_channel(struct spcom_channel *ch, const char *name)
+{
+	if (!ch || !name || !name[0]) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	strlcpy(ch->name, name, SPCOM_CHANNEL_NAME_SIZE);
+
+	init_completion(&ch->rx_done);
+	init_completion(&ch->connect);
+
+	mutex_init(&ch->lock);
+	ch->rpdrv = NULL;
+	ch->rpdev = NULL;
+	ch->actual_rx_size = 0;
+	ch->is_busy = false;
+	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
+	ch->pid = 0;
+	ch->rpmsg_abort = false;
+	ch->rpmsg_rx_buf = NULL;
+	ch->comm_role_undefined = true;
+
+	return 0;
+}
+
+/**
+ * spcom_find_channel_by_name() - find a channel by name.
+ *
+ * @name: channel name
+ *
+ * Return: a channel state struct.
+ */
+static struct spcom_channel *spcom_find_channel_by_name(const char *name)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+		struct spcom_channel *ch = &spcom_dev->channels[i];
+
+		if (strcmp(ch->name, name) == 0)
+			return ch;
+	}
+
+	return NULL;
+}
+
+/**
+ * spcom_rx() - wait for received data until timeout, unless pending rx data is
+ *              already ready
+ *
+ * @ch: channel state struct pointer
+ * @buf: buffer pointer
+ * @size: buffer size
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_rx(struct spcom_channel *ch,
+		     void *buf,
+		     uint32_t size,
+		     uint32_t timeout_msec)
+{
+	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
+	long timeleft = 1;
+	int ret = 0;
+
+	mutex_lock(&ch->lock);
+
+	/* check for already pending data */
+	if (!ch->actual_rx_size) {
+		reinit_completion(&ch->rx_done);
+
+		mutex_unlock(&ch->lock); /* unlock while waiting */
+		/* wait for rx response */
+		pr_debug("wait for rx done, timeout_msec=%d\n", timeout_msec);
+		if (timeout_msec)
+			timeleft = wait_for_completion_interruptible_timeout(
+						     &ch->rx_done, jiffies);
+		else
+			ret = wait_for_completion_interruptible(&ch->rx_done);
+
+		mutex_lock(&ch->lock);
+		if (timeout_msec && timeleft == 0) {
+			ch->txn_id++; /* to drop expired rx packet later */
+			pr_err("rx_done timeout expired %d ms, set txn_id=%d\n",
+			       timeout_msec, ch->txn_id);
+			ret = -ETIMEDOUT;
+			goto exit_err;
+		} else if (ch->rpmsg_abort) {
+			pr_warn("rpmsg channel is closing\n");
+			ret = -ERESTART;
+			goto exit_err;
+		} else if (ret < 0 || timeleft == -ERESTARTSYS) {
+			pr_debug("wait interrupted: ret=%d, timeleft=%ld\n",
+				 ret, timeleft);
+			if (timeleft == -ERESTARTSYS)
+				ret = -ERESTARTSYS;
+			goto exit_err;
+		} else if (ch->actual_rx_size) {
+			pr_debug("actual_rx_size is [%zu], txn_id %d\n",
+				 ch->actual_rx_size, ch->txn_id);
+		} else {
+			pr_err("actual_rx_size is zero\n");
+			ret = -EFAULT;
+			goto exit_err;
+		}
+	} else {
+		pr_debug("pending data size [%zu], requested size [%zu], ch->txn_id %d\n",
+			 ch->actual_rx_size, size, ch->txn_id);
+	}
+	if (!ch->rpmsg_rx_buf) {
+		pr_err("invalid rpmsg_rx_buf\n");
+		ret = -ENOMEM;
+		goto exit_err;
+	}
+
+	size = min_t(size_t, ch->actual_rx_size, size);
+	memcpy(buf, ch->rpmsg_rx_buf, size);
+
+	pr_debug("copy size [%d]\n", (int) size);
+
+	memset(ch->rpmsg_rx_buf, 0, ch->actual_rx_size);
+	kfree((void *)ch->rpmsg_rx_buf);
+	ch->rpmsg_rx_buf = NULL;
+	ch->actual_rx_size = 0;
+
+	mutex_unlock(&ch->lock);
+
+	return size;
+exit_err:
+	mutex_unlock(&ch->lock);
+	return ret;
+}
+
+/**
+ * spcom_get_next_request_size() - get request size.
+ * already ready
+ *
+ * @ch: channel state struct pointer
+ *
+ * Server needs the size of the next request to allocate a request buffer.
+ * Initially used intent-request, however this complicated the remote side,
+ * so both sides are not using glink_tx() with INTENT_REQ anymore.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_get_next_request_size(struct spcom_channel *ch)
+{
+	int size = -1;
+	int ret = 0;
+
+	/* NOTE: Remote clients might not be connected yet.*/
+	mutex_lock(&ch->lock);
+	reinit_completion(&ch->rx_done);
+
+	/* check if already got it via callback */
+	if (ch->actual_rx_size) {
+		pr_debug("next-req-size already ready ch [%s] size [%zu]\n",
+			 ch->name, ch->actual_rx_size);
+		ret = -EFAULT;
+		goto exit_ready;
+	}
+	mutex_unlock(&ch->lock); /* unlock while waiting */
+
+	pr_debug("Wait for Rx Done, ch [%s]\n", ch->name);
+	ret = wait_for_completion_interruptible(&ch->rx_done);
+	if (ret < 0) {
+		pr_debug("ch [%s]:interrupted wait ret=%d\n",
+			 ch->name, ret);
+		goto exit_error;
+	}
+
+	mutex_lock(&ch->lock); /* re-lock after waiting */
+
+	if (ch->actual_rx_size == 0) {
+		pr_err("invalid rx size [%zu] ch [%s]\n",
+		       ch->actual_rx_size, ch->name);
+		mutex_unlock(&ch->lock);
+		ret = -EFAULT;
+		goto exit_error;
+	}
+
+exit_ready:
+	/* actual_rx_size not exeeds SPCOM_RX_BUF_SIZE*/
+	size = (int)ch->actual_rx_size;
+	if (size > sizeof(struct spcom_msg_hdr)) {
+		size -= sizeof(struct spcom_msg_hdr);
+	} else {
+		pr_err("rx size [%d] too small\n", size);
+		ret = -EFAULT;
+		mutex_unlock(&ch->lock);
+		goto exit_error;
+	}
+
+	mutex_unlock(&ch->lock);
+	return size;
+
+exit_error:
+	return ret;
+}
+
+/*======================================================================*/
+/*	USER SPACE commands handling					*/
+/*======================================================================*/
+
+/**
+ * spcom_handle_create_channel_command() - Handle Create Channel command from
+ * user space.
+ *
+ * @cmd_buf:	command buffer.
+ * @cmd_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_create_channel_command(void *cmd_buf, int cmd_size)
+{
+	int ret = 0;
+	struct spcom_user_create_channel_command *cmd = cmd_buf;
+	const char *ch_name;
+	const size_t maxlen = sizeof(cmd->ch_name);
+
+	if (cmd_size != sizeof(*cmd)) {
+		pr_err("cmd_size [%d] , expected [%d]\n",
+		       (int) cmd_size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	ch_name = cmd->ch_name;
+	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
+		pr_err("channel name is not NULL terminated\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ch_name [%s]\n", ch_name);
+
+	ret = spcom_create_channel_chardev(ch_name);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_restart_sp_command() - Handle Restart SP command from
+ * user space.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_restart_sp_command(void)
+{
+	void *subsystem_get_retval = NULL;
+
+	pr_debug("restart - PIL FW loading process initiated\n");
+
+	subsystem_get_retval = subsystem_get("spss");
+	if (!subsystem_get_retval) {
+		pr_err("restart - unable to trigger PIL process for FW loading\n");
+		return -EINVAL;
+	}
+
+	pr_debug("restart - PIL FW loading process is complete\n");
+	return 0;
+}
+
+/**
+ * spcom_handle_send_command() - Handle send request/response from user space.
+ *
+ * @buf:	command buffer.
+ * @buf_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_send_command(struct spcom_channel *ch,
+					     void *cmd_buf, int size)
+{
+	int ret = 0;
+	struct spcom_send_command *cmd = cmd_buf;
+	uint32_t buf_size;
+	void *buf;
+	struct spcom_msg_hdr *hdr;
+	void *tx_buf;
+	int tx_buf_size;
+	uint32_t timeout_msec;
+	int time_msec = 0;
+
+	pr_debug("send req/resp ch [%s] size [%d]\n", ch->name, size);
+
+	/*
+	 * check that cmd buf size is at least struct size,
+	 * to allow access to struct fields.
+	 */
+	if (size < sizeof(*cmd)) {
+		pr_err("ch [%s] invalid cmd buf\n",
+			ch->name);
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* parse command buffer */
+	buf = &cmd->buf;
+	buf_size = cmd->buf_size;
+	timeout_msec = cmd->timeout_msec;
+
+	/* Check param validity */
+	if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid buf size [%d]\n",
+			ch->name, buf_size);
+		return -EINVAL;
+	}
+	if (size != sizeof(*cmd) + buf_size) {
+		pr_err("ch [%s] invalid cmd size [%d]\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	tx_buf_size = sizeof(*hdr) + buf_size;
+	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	if (!tx_buf)
+		return -ENOMEM;
+
+	/* Prepare Tx Buf */
+	hdr = tx_buf;
+
+	mutex_lock(&ch->lock);
+	if (ch->comm_role_undefined) {
+		pr_debug("ch [%s] send first -> it is client\n", ch->name);
+		ch->comm_role_undefined = false;
+		ch->is_server = false;
+	}
+
+	if (!ch->is_server) {
+		ch->txn_id++;   /* client sets the request txn_id */
+		ch->response_timeout_msec = timeout_msec;
+	}
+	hdr->txn_id = ch->txn_id;
+
+	/* user buf */
+	memcpy(hdr->buf, buf, buf_size);
+
+	time_msec = 0;
+	do {
+		if (ch->rpmsg_abort) {
+			pr_err("ch [%s] aborted\n", ch->name);
+			ret = -ECANCELED;
+			break;
+		}
+		/* may fail when RX intent not queued by SP */
+		ret = rpmsg_trysend(ch->rpdev->ept, tx_buf, tx_buf_size);
+		if (ret == 0)
+			break;
+		time_msec += TX_RETRY_DELAY_MSEC;
+		mutex_unlock(&ch->lock);
+		msleep(TX_RETRY_DELAY_MSEC);
+		mutex_lock(&ch->lock);
+	} while ((ret == -EBUSY || ret == -EAGAIN) && time_msec < timeout_msec);
+	if (ret)
+		pr_err("ch [%s] rpmsg_trysend() error (%d), timeout_msec=%d\n",
+		       ch->name, ret, timeout_msec);
+	mutex_unlock(&ch->lock);
+
+	kfree(tx_buf);
+	return ret;
+}
+
+/**
+ * modify_ion_addr() - replace the ION buffer virtual address with physical
+ * address in a request or response buffer.
+ *
+ * @buf: buffer to modify
+ * @buf_size: buffer size
+ * @ion_info: ION buffer info such as FD and offset in buffer.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int modify_ion_addr(void *buf,
+			    uint32_t buf_size,
+			    struct spcom_ion_info ion_info)
+{
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *sg = NULL;
+	dma_addr_t phy_addr = 0;
+	int fd, ret = 0;
+	uint32_t buf_offset;
+	char *ptr = (char *)buf;
+
+	fd = ion_info.fd;
+	buf_offset = ion_info.buf_offset;
+	ptr += buf_offset;
+
+	if (fd < 0) {
+		pr_err("invalid fd [%d]\n", fd);
+		return -ENODEV;
+	}
+
+	if (buf_size < sizeof(uint64_t)) {
+		pr_err("buf size too small [%d]\n", buf_size);
+		return -ENODEV;
+	}
+
+	if (buf_offset % sizeof(uint64_t))
+		pr_debug("offset [%d] is NOT 64-bit aligned\n", buf_offset);
+	else
+		pr_debug("offset [%d] is 64-bit aligned\n", buf_offset);
+
+	if (buf_offset > buf_size - sizeof(uint64_t)) {
+		pr_err("invalid buf_offset [%d]\n", buf_offset);
+		return -ENODEV;
+	}
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		pr_err("fail to get dma buf handle\n");
+		return -EINVAL;
+	}
+	pr_debug("dma_buf handle ok\n");
+	attach = dma_buf_attach(dma_buf, &spcom_dev->pdev->dev);
+	if (IS_ERR_OR_NULL(attach)) {
+		ret = PTR_ERR(attach);
+		pr_err("fail to attach dma buf %d\n", ret);
+		dma_buf_put(dma_buf);
+		goto mem_map_table_failed;
+	}
+
+	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(sg)) {
+		ret = PTR_ERR(sg);
+		pr_err("fail to get sg table of dma buf %d\n", ret);
+		goto mem_map_table_failed;
+	}
+	if (sg->sgl) {
+		phy_addr = sg->sgl->dma_address;
+	} else {
+		pr_err("sgl is NULL\n");
+		ret = -ENOMEM;
+		goto mem_map_sg_failed;
+	}
+
+	/* Set the physical address at the buffer offset */
+	pr_debug("ion phys addr = [0x%lx]\n", (long) phy_addr);
+	memcpy(ptr, &phy_addr, sizeof(phy_addr));
+
+mem_map_sg_failed:
+	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+mem_map_table_failed:
+	dma_buf_detach(dma_buf, attach);
+	dma_buf_put(dma_buf);
+
+	return ret;
+}
+
+/**
+ * spcom_handle_send_modified_command() - send a request/response with ION
+ * buffer address. Modify the request/response by replacing the ION buffer
+ * virtual address with the physical address.
+ *
+ * @ch: channel pointer
+ * @cmd_buf: User space command buffer
+ * @size: size of user command buffer
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_send_modified_command(struct spcom_channel *ch,
+					       void *cmd_buf, int size)
+{
+	int ret = 0;
+	struct spcom_user_send_modified_command *cmd = cmd_buf;
+	uint32_t buf_size;
+	void *buf;
+	struct spcom_msg_hdr *hdr;
+	void *tx_buf;
+	int tx_buf_size;
+	struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF_PER_CMD];
+	int i;
+	uint32_t timeout_msec;
+	int time_msec = 0;
+
+	pr_debug("send req/resp ch [%s] size [%d]\n", ch->name, size);
+
+	/*
+	 * check that cmd buf size is at least struct size,
+	 * to allow access to struct fields.
+	 */
+	if (size < sizeof(*cmd)) {
+		pr_err("ch [%s] invalid cmd buf\n",
+			ch->name);
+		return -EINVAL;
+	}
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* parse command buffer */
+	buf = &cmd->buf;
+	buf_size = cmd->buf_size;
+	timeout_msec = cmd->timeout_msec;
+	memcpy(ion_info, cmd->ion_info, sizeof(ion_info));
+
+	/* Check param validity */
+	if (buf_size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid buf size [%d]\n",
+			ch->name, buf_size);
+		return -EINVAL;
+	}
+	if (size != sizeof(*cmd) + buf_size) {
+		pr_err("ch [%s] invalid cmd size [%d]\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	tx_buf_size = sizeof(*hdr) + buf_size;
+	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	if (!tx_buf)
+		return -ENOMEM;
+
+	/* Prepare Tx Buf */
+	hdr = tx_buf;
+
+	mutex_lock(&ch->lock);
+	if (ch->comm_role_undefined) {
+		pr_debug("ch [%s] send first -> it is client\n", ch->name);
+		ch->comm_role_undefined = false;
+		ch->is_server = false;
+	}
+	if (!ch->is_server) {
+		ch->txn_id++;   /* client sets the request txn_id */
+		ch->response_timeout_msec = timeout_msec;
+	}
+	hdr->txn_id = ch->txn_id;
+
+	/* user buf */
+	memcpy(hdr->buf, buf, buf_size);
+
+	for (i = 0 ; i < ARRAY_SIZE(ion_info) ; i++) {
+		if (ion_info[i].fd >= 0) {
+			ret = modify_ion_addr(hdr->buf, buf_size, ion_info[i]);
+			if (ret < 0) {
+				mutex_unlock(&ch->lock);
+				pr_err("modify_ion_addr() error [%d]\n", ret);
+				memset(tx_buf, 0, tx_buf_size);
+				kfree(tx_buf);
+				return -EFAULT;
+			}
+		}
+	}
+
+	time_msec = 0;
+	do {
+		if (ch->rpmsg_abort) {
+			pr_err("ch [%s] aborted\n", ch->name);
+			ret = -ECANCELED;
+			break;
+		}
+		/* may fail when RX intent not queued by SP */
+		ret = rpmsg_trysend(ch->rpdev->ept, tx_buf, tx_buf_size);
+		if (ret == 0)
+			break;
+		time_msec += TX_RETRY_DELAY_MSEC;
+		mutex_unlock(&ch->lock);
+		msleep(TX_RETRY_DELAY_MSEC);
+		mutex_lock(&ch->lock);
+	} while ((ret == -EBUSY || ret == -EAGAIN) && time_msec < timeout_msec);
+	if (ret)
+		pr_err("ch [%s] rpmsg_trysend() error (%d), timeout_msec=%d\n",
+		       ch->name, ret, timeout_msec);
+
+	mutex_unlock(&ch->lock);
+	memset(tx_buf, 0, tx_buf_size);
+	kfree(tx_buf);
+	return ret;
+}
+
+
+/**
+ * spcom_handle_lock_ion_buf_command() - Lock an shared buffer.
+ *
+ * Lock an shared buffer, prevent it from being free if the userspace App crash,
+ * while it is used by the remote subsystem.
+ */
+static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
+					      void *cmd_buf, int size)
+{
+	struct spcom_user_command *cmd = cmd_buf;
+	int fd;
+	int i;
+	struct dma_buf *dma_buf;
+
+	if (size != sizeof(*cmd)) {
+		pr_err("cmd size [%d] , expected [%d]\n",
+		       (int) size,  (int) sizeof(*cmd));
+		return -EINVAL;
+	}
+
+	if (cmd->arg > (unsigned int)INT_MAX) {
+		pr_err("int overflow [%ld]\n", cmd->arg);
+		return -EINVAL;
+	}
+	fd = cmd->arg;
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		pr_err("fail to get dma buf handle\n");
+		return -EINVAL;
+	}
+	pr_debug("dma_buf referenced ok\n");
+
+	/* shared buf lock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
+	/* Check if this shared buffer is already locked */
+	for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) {
+		if (ch->dmabuf_handle_table[i] == dma_buf) {
+			pr_debug("fd [%d] shared buf is already locked\n", fd);
+			/* decrement back the ref count */
+			mutex_unlock(&ch->lock);
+			dma_buf_put(dma_buf);
+			return -EINVAL;
+		}
+	}
+
+	/* Store the dma_buf handle */
+	for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) {
+		if (ch->dmabuf_handle_table[i] == NULL) {
+			ch->dmabuf_handle_table[i] = dma_buf;
+			ch->dmabuf_fd_table[i] = fd;
+			pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%x\n",
+				ch->name, i,
+				ch->dmabuf_fd_table[i],
+				ch->dmabuf_handle_table[i]);
+			mutex_unlock(&ch->lock);
+			return 0;
+		}
+	}
+
+	mutex_unlock(&ch->lock);
+	/* decrement back the ref count */
+	dma_buf_put(dma_buf);
+	pr_err("no free entry to store ion handle of fd [%d]\n", fd);
+
+	return -EFAULT;
+}
+
+/**
+ * spcom_handle_unlock_ion_buf_command() - Unlock an ION buffer.
+ *
+ * Unlock an ION buffer, let it be free, when it is no longer being used by
+ * the remote subsystem.
+ */
+static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
+					      void *cmd_buf, int size)
+{
+	int i;
+	struct spcom_user_command *cmd = cmd_buf;
+	int fd;
+	bool found = false;
+	struct dma_buf *dma_buf;
+
+	if (size != sizeof(*cmd)) {
+		pr_err("cmd size [%d], expected [%d]\n",
+		       (int)size, (int)sizeof(*cmd));
+		return -EINVAL;
+	}
+	if (cmd->arg > (unsigned int)INT_MAX) {
+		pr_err("int overflow [%ld]\n", cmd->arg);
+		return -EINVAL;
+	}
+	fd = cmd->arg;
+
+	pr_debug("Unlock ion buf ch [%s] fd [%d]\n", ch->name, fd);
+
+	dma_buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		pr_err("fail to get dma buf handle\n");
+		return -EINVAL;
+	}
+	dma_buf_put(dma_buf);
+	pr_debug("dma_buf referenced ok\n");
+
+	/* shared buf unlock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+	if (fd == (int) SPCOM_ION_FD_UNLOCK_ALL) {
+		pr_debug("unlocked ALL ion buf ch [%s]\n", ch->name);
+		found = true;
+		/* unlock all buf */
+		for (i = 0; i < ARRAY_SIZE(ch->dmabuf_handle_table); i++) {
+			if (ch->dmabuf_handle_table[i] != NULL) {
+				pr_debug("unlocked ion buf #%d fd [%d]\n",
+					i, ch->dmabuf_fd_table[i]);
+				dma_buf_put(ch->dmabuf_handle_table[i]);
+				ch->dmabuf_handle_table[i] = NULL;
+				ch->dmabuf_fd_table[i] = -1;
+			}
+		}
+	} else {
+		/* unlock specific buf */
+		for (i = 0 ; i < ARRAY_SIZE(ch->dmabuf_handle_table) ; i++) {
+			if (!ch->dmabuf_handle_table[i])
+				continue;
+			if (ch->dmabuf_handle_table[i] == dma_buf) {
+				pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%x\n",
+					ch->name, i,
+					ch->dmabuf_fd_table[i],
+					ch->dmabuf_handle_table[i]);
+				dma_buf_put(ch->dmabuf_handle_table[i]);
+				ch->dmabuf_handle_table[i] = NULL;
+				ch->dmabuf_fd_table[i] = -1;
+				found = true;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&ch->lock);
+
+	if (!found) {
+		pr_err("ch [%s] fd [%d] was not found\n", ch->name, fd);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * spcom_handle_write() - Handle user space write commands.
+ *
+ * @buf:	command buffer.
+ * @buf_size:	command buffer size.
+ *
+ * Return: 0 on successful operation, negative value otherwise.
+ */
+static int spcom_handle_write(struct spcom_channel *ch,
+			       void *buf,
+			       int buf_size)
+{
+	int ret = 0;
+	struct spcom_user_command *cmd = NULL;
+	int cmd_id = 0;
+
+	/* Minimal command should have command-id and argument */
+	if (buf_size < sizeof(struct spcom_user_command)) {
+		pr_err("Command buffer size [%d] too small\n", buf_size);
+		return -EINVAL;
+	}
+
+	cmd = (struct spcom_user_command *)buf;
+	cmd_id = (int) cmd->cmd_id;
+
+	pr_debug("cmd_id [0x%x]\n", cmd_id);
+
+	if (!ch && cmd_id != SPCOM_CMD_CREATE_CHANNEL
+			&& cmd_id != SPCOM_CMD_RESTART_SP) {
+		pr_err("channel context is null\n");
+		return -EINVAL;
+	}
+
+	switch (cmd_id) {
+	case SPCOM_CMD_SEND:
+		ret = spcom_handle_send_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_SEND_MODIFIED:
+		ret = spcom_handle_send_modified_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_LOCK_ION_BUF:
+		ret = spcom_handle_lock_ion_buf_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_UNLOCK_ION_BUF:
+		ret = spcom_handle_unlock_ion_buf_command(ch, buf, buf_size);
+		break;
+	case SPCOM_CMD_CREATE_CHANNEL:
+		ret = spcom_handle_create_channel_command(buf, buf_size);
+		break;
+	case SPCOM_CMD_RESTART_SP:
+		ret = spcom_handle_restart_sp_command();
+		break;
+	default:
+		pr_err("Invalid Command Id [0x%x]\n", (int) cmd->cmd_id);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * spcom_handle_get_req_size() - Handle user space get request size command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_get_req_size(struct spcom_channel *ch,
+				      void *buf,
+				      uint32_t size)
+{
+	int ret = -1;
+	uint32_t next_req_size = 0;
+
+	if (size < sizeof(next_req_size)) {
+		pr_err("buf size [%d] too small\n", (int) size);
+		return -EINVAL;
+	}
+
+	ret = spcom_get_next_request_size(ch);
+	if (ret < 0)
+		return ret;
+	next_req_size = (uint32_t) ret;
+
+	memcpy(buf, &next_req_size, sizeof(next_req_size));
+	pr_debug("next_req_size [%d]\n", next_req_size);
+
+	return sizeof(next_req_size); /* can't exceed user buffer size */
+}
+
+/**
+ * spcom_handle_read_req_resp() - Handle user space get request/response command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_read_req_resp(struct spcom_channel *ch,
+				       void *buf,
+				       uint32_t size)
+{
+	int ret;
+	struct spcom_msg_hdr *hdr;
+	void *rx_buf;
+	int rx_buf_size;
+	uint32_t timeout_msec = 0; /* client only */
+
+	/* Check if remote side connect */
+	if (!spcom_is_channel_connected(ch)) {
+		pr_err("ch [%s] remote side not connect\n", ch->name);
+		return -ENOTCONN;
+	}
+
+	/* Check param validity */
+	if (size > SPCOM_MAX_RESPONSE_SIZE) {
+		pr_err("ch [%s] invalid size [%d]\n",
+			ch->name, size);
+		return -EINVAL;
+	}
+
+	/* Allocate Buffers*/
+	rx_buf_size = sizeof(*hdr) + size;
+	rx_buf = kzalloc(rx_buf_size, GFP_KERNEL);
+	if (!rx_buf)
+		return -ENOMEM;
+
+	/*
+	 * client response timeout depends on the request
+	 * handling time on the remote side .
+	 */
+	if (!ch->is_server) {
+		timeout_msec = ch->response_timeout_msec;
+		pr_debug("response_timeout_msec = %d\n", (int) timeout_msec);
+	}
+
+	ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
+	if (ret < 0) {
+		pr_err("rx error %d\n", ret);
+		goto exit_err;
+	} else {
+		size = ret; /* actual_rx_size */
+	}
+
+	hdr = rx_buf;
+
+	if (ch->is_server) {
+		ch->txn_id = hdr->txn_id;
+		pr_debug("request txn_id [0x%x]\n", ch->txn_id);
+	}
+
+	/* copy data to user without the header */
+	if (size > sizeof(*hdr)) {
+		size -= sizeof(*hdr);
+		memcpy(buf, hdr->buf, size);
+	} else {
+		pr_err("rx size [%d] too small\n", size);
+		ret = -EFAULT;
+		goto exit_err;
+	}
+
+	kfree(rx_buf);
+	return size;
+exit_err:
+	kfree(rx_buf);
+	return ret;
+}
+
+/**
+ * spcom_handle_read() - Handle user space read request/response or
+ * request-size command
+ *
+ * @ch:	channel handle
+ * @buf:	command buffer.
+ * @size:	command buffer size.
+ *
+ * A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
+ * response/request tells the kernel that user space only need the size.
+ *
+ * Return: size in bytes on success, negative value on failure.
+ */
+static int spcom_handle_read(struct spcom_channel *ch,
+			      void *buf,
+			      uint32_t size)
+{
+	int ret = -1;
+
+	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
+		pr_debug("get next request size, ch [%s]\n", ch->name);
+		ch->is_server = true;
+		ret = spcom_handle_get_req_size(ch, buf, size);
+	} else {
+		pr_debug("get request/response, ch [%s]\n", ch->name);
+		ret = spcom_handle_read_req_resp(ch, buf, size);
+	}
+
+	pr_debug("ch [%s] , size = %d\n", ch->name, size);
+
+	return ret;
+}
+
+/*======================================================================*/
+/*		CHAR DEVICE USER SPACE INTERFACE			*/
+/*======================================================================*/
+
+/**
+ * file_to_filename() - get the filename from file pointer.
+ *
+ * @filp: file pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+static char *file_to_filename(struct file *filp)
+{
+	struct dentry *dentry = NULL;
+	char *filename = NULL;
+
+	if (!filp || !filp->f_path.dentry)
+		return "unknown";
+
+	dentry = filp->f_path.dentry;
+	filename = dentry->d_iname;
+
+	return filename;
+}
+
+/**
+ * spcom_device_open() - handle channel file open() from user space.
+ *
+ * @filp: file pointer
+ *
+ * The file name (without path) is the channel name.
+ * Register rpmsg driver matching with channel name.
+ * Store the channel context in the file private date pointer for future
+ * read/write/close operations.
+ */
+static int spcom_device_open(struct inode *inode, struct file *filp)
+{
+	struct spcom_channel *ch;
+	int ret;
+	const char *name = file_to_filename(filp);
+	u32 pid = current_pid();
+
+	pr_debug("open file [%s]\n", name);
+
+	if (strcmp(name, "unknown") == 0) {
+		pr_err("name is unknown\n");
+		return -EINVAL;
+	}
+
+	if (strcmp(name, DEVICE_NAME) == 0) {
+		pr_debug("root dir skipped\n");
+		return 0;
+	}
+
+	if (strcmp(name, "sp_ssr") == 0) {
+		pr_debug("sp_ssr dev node skipped\n");
+		return 0;
+	}
+
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s doesn't exist, load App first\n", name);
+		return -ENODEV;
+	}
+
+	mutex_lock(&ch->lock);
+	if (!spcom_is_channel_open(ch)) {
+		reinit_completion(&ch->connect);
+		/* channel was closed need to register drv again */
+		ret = spcom_register_rpmsg_drv(ch);
+		if (ret < 0) {
+			pr_err("register rpmsg driver failed %d\n", ret);
+			mutex_unlock(&ch->lock);
+			return ret;
+		}
+	}
+	/* only one client/server may use the channel */
+	if (ch->is_busy) {
+		pr_err("channel [%s] is BUSY, already in use by pid [%d]\n",
+			name, ch->pid);
+		mutex_unlock(&ch->lock);
+		return -EBUSY;
+	}
+
+	ch->is_busy = true;
+	ch->pid = pid;
+	mutex_unlock(&ch->lock);
+
+	filp->private_data = ch;
+	return 0;
+}
+
+/**
+ * spcom_device_release() - handle channel file close() from user space.
+ *
+ * @filp: file pointer
+ *
+ * The file name (without path) is the channel name.
+ * Open the relevant glink channel.
+ * Store the channel context in the file private
+ * date pointer for future read/write/close
+ * operations.
+ */
+static int spcom_device_release(struct inode *inode, struct file *filp)
+{
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	int ret = 0;
+
+	if (strcmp(name, "unknown") == 0) {
+		pr_err("name is unknown\n");
+		return -EINVAL;
+	}
+
+	if (strcmp(name, DEVICE_NAME) == 0) {
+		pr_debug("root dir skipped\n");
+		return 0;
+	}
+
+	if (strcmp(name, "sp_ssr") == 0) {
+		pr_debug("sp_ssr dev node skipped\n");
+		return 0;
+	}
+
+	ch = filp->private_data;
+	if (!ch) {
+		pr_debug("ch is NULL, file name %s\n", file_to_filename(filp));
+		return -ENODEV;
+	}
+
+	mutex_lock(&ch->lock);
+	/* channel might be already closed or disconnected */
+	if (!spcom_is_channel_open(ch)) {
+		pr_debug("ch [%s] already closed\n", name);
+		mutex_unlock(&ch->lock);
+		return 0;
+	}
+
+	ch->is_busy = false;
+	ch->pid = 0;
+	if (ch->rpmsg_rx_buf) {
+		pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%d\n",
+		       name, ch->actual_rx_size);
+		kfree(ch->rpmsg_rx_buf);
+		ch->rpmsg_rx_buf = NULL;
+	}
+	ch->actual_rx_size = 0;
+	mutex_unlock(&ch->lock);
+	filp->private_data = NULL;
+
+	return ret;
+}
+
+/**
+ * spcom_device_write() - handle channel file write() from user space.
+ *
+ * @filp: file pointer
+ *
+ * Return: On Success - same size as number of bytes to write.
+ * On Failure - negative value.
+ */
+static ssize_t spcom_device_write(struct file *filp,
+				   const char __user *user_buff,
+				   size_t size, loff_t *f_pos)
+{
+	int ret;
+	char *buf;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	int buf_size = 0;
+
+	if (!user_buff || !f_pos || !filp) {
+		pr_err("invalid null parameters\n");
+		return -EINVAL;
+	}
+
+	if (*f_pos != 0) {
+		pr_err("offset should be zero, no sparse buffer\n");
+		return -EINVAL;
+	}
+
+	if (!name) {
+		pr_err("name is NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("write file [%s] size [%d] pos [%d]\n",
+		 name, (int) size, (int) *f_pos);
+
+	if (strcmp(name, "unknown") == 0) {
+		pr_err("name is unknown\n");
+		return -EINVAL;
+	}
+
+	ch = filp->private_data;
+	if (!ch) {
+		if (strcmp(name, DEVICE_NAME) != 0) {
+			pr_err("invalid ch pointer, command not allowed\n");
+			return -EINVAL;
+		}
+		pr_debug("control device - no channel context\n");
+	} else {
+		/* Check if remote side connect */
+		if (!spcom_is_channel_connected(ch)) {
+			pr_err("ch [%s] remote side not connect\n", ch->name);
+			return -ENOTCONN;
+		}
+	}
+
+	if (size > SPCOM_MAX_COMMAND_SIZE) {
+		pr_err("size [%d] > max size [%d]\n",
+			   (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
+		return -EINVAL;
+	}
+	buf_size = size; /* explicit casting size_t to int */
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = copy_from_user(buf, user_buff, size);
+	if (ret) {
+		pr_err("Unable to copy from user (err %d)\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	ret = spcom_handle_write(ch, buf, buf_size);
+	if (ret) {
+		pr_err("handle command error [%d]\n", ret);
+		kfree(buf);
+		return ret;
+	}
+
+	kfree(buf);
+
+	return size;
+}
+
+/**
+ * spcom_device_read() - handle channel file read() from user space.
+ *
+ * @filp: file pointer
+ *
+ * Return: number of bytes to read on success, negative value on
+ * failure.
+ */
+static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
+				 size_t size, loff_t *f_pos)
+{
+	int ret = 0;
+	int actual_size = 0;
+	char *buf;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	uint32_t buf_size = 0;
+
+	pr_debug("read file [%s], size = %d bytes\n", name, (int) size);
+
+	if (strcmp(name, "unknown") == 0) {
+		pr_err("name is unknown\n");
+		return -EINVAL;
+	}
+
+	if (!user_buff || !f_pos ||
+	    (size == 0) || (size > SPCOM_MAX_READ_SIZE)) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+	buf_size = size; /* explicit casting size_t to uint32_t */
+
+	ch = filp->private_data;
+
+	if (ch == NULL) {
+		pr_err("invalid ch pointer, file [%s]\n", name);
+		return -EINVAL;
+	}
+
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch is not open, file [%s]\n", name);
+		return -EINVAL;
+	}
+
+	buf = kzalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = spcom_handle_read(ch, buf, buf_size);
+	if (ret < 0) {
+		if (ret != -ERESTARTSYS)
+			pr_err("read error [%d]\n", ret);
+		kfree(buf);
+		return ret;
+	}
+	actual_size = ret;
+	if ((actual_size == 0) || (actual_size > size)) {
+		pr_err("invalid actual_size [%d]\n", actual_size);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	ret = copy_to_user(user_buff, buf, actual_size);
+	if (ret) {
+		pr_err("Unable to copy to user, err = %d\n", ret);
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	kfree(buf);
+	pr_debug("ch [%s] ret [%d]\n", name, (int) actual_size);
+
+	return actual_size;
+}
+
+/**
+ * spcom_device_poll() - handle channel file poll() from user space.
+ *
+ * @filp: file pointer
+ *
+ * This allows user space to wait/check for channel connection,
+ * or wait for SSR event.
+ *
+ * Return: event bitmask on success, set POLLERR on failure.
+ */
+static unsigned int spcom_device_poll(struct file *filp,
+				       struct poll_table_struct *poll_table)
+{
+	/*
+	 * when user call with timeout -1 for blocking mode,
+	 * any bit must be set in response
+	 */
+	unsigned int ret = SPCOM_POLL_READY_FLAG;
+	unsigned long mask;
+	struct spcom_channel *ch;
+	const char *name = file_to_filename(filp);
+	bool wait = false;
+	bool done = false;
+	/* Event types always implicitly polled for */
+	unsigned long reserved = POLLERR | POLLHUP | POLLNVAL;
+	int ready = 0;
+
+	if (strcmp(name, "unknown") == 0) {
+		pr_err("name is unknown\n");
+		return -EINVAL;
+	}
+
+	if (!poll_table) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	ch = filp->private_data;
+	mask = poll_requested_events(poll_table);
+
+	pr_debug("== ch [%s] mask [0x%x] ==\n", name, (int) mask);
+
+	/* user space API has poll use "short" and not "long" */
+	mask &= 0x0000FFFF;
+
+	wait = mask & SPCOM_POLL_WAIT_FLAG;
+	if (wait)
+		pr_debug("ch [%s] wait for event flag is ON\n", name);
+
+	// mask will be used in output, clean input bits
+	mask &= (unsigned long)~SPCOM_POLL_WAIT_FLAG;
+	mask &= (unsigned long)~SPCOM_POLL_READY_FLAG;
+	mask &= (unsigned long)~reserved;
+
+	switch (mask) {
+	case SPCOM_POLL_LINK_STATE:
+		pr_debug("ch [%s] SPCOM_POLL_LINK_STATE\n", name);
+		if (wait) {
+			reinit_completion(&spcom_dev->rpmsg_state_change);
+			ready = wait_for_completion_interruptible(
+					  &spcom_dev->rpmsg_state_change);
+			pr_debug("ch [%s] poll LINK_STATE signaled\n", name);
+		}
+		done = atomic_read(&spcom_dev->rpmsg_dev_count) > 0;
+		break;
+	case SPCOM_POLL_CH_CONNECT:
+		/*
+		 * ch is not expected to be NULL since user must call open()
+		 * to get FD before it can call poll().
+		 * open() will fail if no ch related to the char-device.
+		 */
+		if (ch == NULL) {
+			pr_err("invalid ch pointer, file [%s]\n", name);
+			return POLLERR;
+		}
+		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT\n", name);
+		if (wait) {
+			reinit_completion(&ch->connect);
+			ready = wait_for_completion_interruptible(&ch->connect);
+			pr_debug("ch [%s] poll CH_CONNECT signaled\n", name);
+		}
+		mutex_lock(&ch->lock);
+		done = completion_done(&ch->connect);
+		mutex_unlock(&ch->lock);
+		break;
+	default:
+		pr_err("ch [%s] poll, invalid mask [0x%x]\n",
+			 name, (int) mask);
+		ret = POLLERR;
+		break;
+	}
+
+	if (ready < 0) { /* wait was interrupted */
+		pr_debug("ch [%s] poll interrupted, ret [%d]\n", name, ready);
+		ret = POLLERR | SPCOM_POLL_READY_FLAG | mask;
+	}
+	if (done)
+		ret |= mask;
+
+	pr_debug("ch [%s] poll, mask = 0x%x, ret=0x%x\n",
+		 name, (int) mask, ret);
+
+	return ret;
+}
+
+/* file operation supported from user space */
+static const struct file_operations fops = {
+	.read = spcom_device_read,
+	.poll = spcom_device_poll,
+	.write = spcom_device_write,
+	.open = spcom_device_open,
+	.release = spcom_device_release,
+};
+
+/**
+ * spcom_create_channel_chardev() - Create a channel char-dev node file
+ * for user space interface
+ */
+static int spcom_create_channel_chardev(const char *name)
+{
+	int ret;
+	struct device *dev;
+	struct spcom_channel *ch;
+	dev_t devt;
+	struct class *cls = spcom_dev->driver_class;
+	struct device *parent = spcom_dev->class_dev;
+	void *priv;
+	struct cdev *cdev;
+
+	pr_debug("Add channel [%s]\n", name);
+
+	ch = spcom_find_channel_by_name(name);
+	if (ch) {
+		pr_err("channel [%s] already exist\n", name);
+		return -EINVAL;
+	}
+
+	ch = spcom_find_channel_by_name(""); /* find reserved channel */
+	if (!ch) {
+		pr_err("no free channel\n");
+		return -ENODEV;
+	}
+
+	ret = spcom_init_channel(ch, name);
+	if (ret < 0) {
+		pr_err("can't init channel %d\n", ret);
+		return ret;
+	}
+
+	ret = spcom_register_rpmsg_drv(ch);
+	if (ret < 0) {
+		pr_err("register rpmsg driver failed %d\n", ret);
+		goto exit_destroy_channel;
+	}
+
+	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (!cdev) {
+		ret = -ENOMEM;
+		goto exit_unregister_drv;
+	}
+
+	devt = spcom_dev->device_no + atomic_read(&spcom_dev->chdev_count);
+	priv = ch;
+	dev = device_create(cls, parent, devt, priv, name);
+	if (IS_ERR(dev)) {
+		pr_err("device_create failed\n");
+		ret = -ENODEV;
+		goto exit_free_cdev;
+	}
+
+	cdev_init(cdev, &fops);
+	cdev->owner = THIS_MODULE;
+
+	ret = cdev_add(cdev, devt, 1);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		ret = -ENODEV;
+		goto exit_destroy_device;
+	}
+	atomic_inc(&spcom_dev->chdev_count);
+	mutex_lock(&ch->lock);
+	ch->cdev = cdev;
+	ch->dev = dev;
+	mutex_unlock(&ch->lock);
+
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spcom_dev->driver_class, devt);
+exit_free_cdev:
+	kfree(cdev);
+exit_unregister_drv:
+	ret = spcom_unregister_rpmsg_drv(ch);
+	if (ret != 0)
+		pr_err("can't unregister rpmsg drv %d\n", ret);
+exit_destroy_channel:
+	// empty channel leaves free slot for next time
+	mutex_lock(&ch->lock);
+	memset(ch->name, 0, SPCOM_CHANNEL_NAME_SIZE);
+	mutex_unlock(&ch->lock);
+	return -EFAULT;
+}
+
+static int spcom_register_chardev(void)
+{
+	int ret;
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	void *priv = spcom_dev;
+
+	ret = alloc_chrdev_region(&spcom_dev->device_no, baseminor, count,
+				 DEVICE_NAME);
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		return ret;
+	}
+
+	spcom_dev->driver_class = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(spcom_dev->driver_class)) {
+		ret = -ENOMEM;
+		pr_err("class_create failed %d\n", ret);
+		goto exit_unreg_chrdev_region;
+	}
+
+	spcom_dev->class_dev = device_create(spcom_dev->driver_class, NULL,
+				  spcom_dev->device_no, priv,
+				  DEVICE_NAME);
+
+	if (IS_ERR(spcom_dev->class_dev)) {
+		pr_err("class_device_create failed %d\n", ret);
+		ret = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&spcom_dev->cdev, &fops);
+	spcom_dev->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&spcom_dev->cdev,
+		       MKDEV(MAJOR(spcom_dev->device_no), 0),
+		       SPCOM_MAX_CHANNELS);
+	if (ret < 0) {
+		pr_err("cdev_add failed %d\n", ret);
+		goto exit_destroy_device;
+	}
+
+	pr_debug("char device created\n");
+
+	return 0;
+
+exit_destroy_device:
+	device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
+exit_destroy_class:
+	class_destroy(spcom_dev->driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(spcom_dev->device_no, 1);
+	return ret;
+}
+
+static void spcom_unregister_chrdev(void)
+{
+	cdev_del(&spcom_dev->cdev);
+	device_destroy(spcom_dev->driver_class, spcom_dev->device_no);
+	class_destroy(spcom_dev->driver_class);
+	unregister_chrdev_region(spcom_dev->device_no,
+				 atomic_read(&spcom_dev->chdev_count));
+
+}
+
+static int spcom_parse_dt(struct device_node *np)
+{
+	int ret;
+	const char *propname = "qcom,spcom-ch-names";
+	int num_ch;
+	int i;
+	const char *name;
+
+	num_ch = of_property_count_strings(np, propname);
+	if (num_ch < 0) {
+		pr_err("wrong format of predefined channels definition [%d]\n",
+		       num_ch);
+		return num_ch;
+	}
+	if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
+		pr_err("too many predefined channels [%d]\n", num_ch);
+		return -EINVAL;
+	}
+
+	pr_debug("num of predefined channels [%d]\n", num_ch);
+	for (i = 0; i < num_ch; i++) {
+		ret = of_property_read_string_index(np, propname, i, &name);
+		if (ret) {
+			pr_err("failed to read DT channel [%d] name\n", i);
+			return -EFAULT;
+		}
+		strlcpy(spcom_dev->predefined_ch_name[i],
+			name,
+			sizeof(spcom_dev->predefined_ch_name[i]));
+
+		pr_debug("found ch [%s]\n", name);
+	}
+
+	return num_ch;
+}
+
+/*
+ * the function is running on system workqueue context,
+ * processes delayed (by rpmsg rx callback) packets:
+ * each paket belong to its destination spcom channel ch
+ */
+static void spcom_signal_rx_done(struct work_struct *ignored)
+{
+	struct spcom_channel *ch;
+	struct rx_buff_list *rx_item;
+	struct spcom_msg_hdr *hdr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&spcom_dev->rx_lock, flags);
+	while (!list_empty(&spcom_dev->rx_list_head)) {
+		/* detach last entry */
+		rx_item = list_last_entry(&spcom_dev->rx_list_head,
+					  struct rx_buff_list, list);
+		list_del(&rx_item->list);
+		spin_unlock_irqrestore(&spcom_dev->rx_lock, flags);
+
+		if (!rx_item) {
+			pr_err("empty entry in pending rx list\n");
+			spin_lock_irqsave(&spcom_dev->rx_lock, flags);
+			continue;
+		}
+		ch = rx_item->ch;
+		hdr = (struct spcom_msg_hdr *)rx_item->rpmsg_rx_buf;
+		mutex_lock(&ch->lock);
+
+		if (ch->comm_role_undefined) {
+			ch->comm_role_undefined = false;
+			ch->is_server = true;
+			ch->txn_id = hdr->txn_id;
+			pr_debug("ch [%s] first packet txn_id=%d, it is server\n",
+				 ch->name, ch->txn_id);
+		}
+
+		if (ch->rpmsg_abort) {
+			if (ch->rpmsg_rx_buf) {
+				pr_debug("ch [%s] rx aborted free %d bytes\n",
+					ch->name, ch->actual_rx_size);
+				kfree(ch->rpmsg_rx_buf);
+				ch->actual_rx_size = 0;
+			}
+			goto rx_aborted;
+		}
+		if (ch->rpmsg_rx_buf) {
+			pr_err("ch [%s] previous buffer not consumed %d bytes\n",
+			       ch->name, ch->actual_rx_size);
+			kfree(ch->rpmsg_rx_buf);
+			ch->rpmsg_rx_buf = NULL;
+			ch->actual_rx_size = 0;
+		}
+		if (!ch->is_server && (hdr->txn_id != ch->txn_id)) {
+			pr_err("ch [%s] rx dropped txn_id %d, ch->txn_id %d\n",
+				ch->name, hdr->txn_id, ch->txn_id);
+			goto rx_aborted;
+		}
+		ch->rpmsg_rx_buf = rx_item->rpmsg_rx_buf;
+		ch->actual_rx_size = rx_item->rx_buf_size;
+		complete_all(&ch->rx_done);
+		mutex_unlock(&ch->lock);
+
+		kfree(rx_item);
+
+		/* lock for the next list entry */
+		spin_lock_irqsave(&spcom_dev->rx_lock, flags);
+	}
+	spin_unlock_irqrestore(&spcom_dev->rx_lock, flags);
+	return;
+rx_aborted:
+	mutex_unlock(&ch->lock);
+	kfree(rx_item->rpmsg_rx_buf);
+	kfree(rx_item);
+}
+
+static int spcom_rpdev_cb(struct rpmsg_device *rpdev,
+			  void *data, int len, void *priv, u32 src)
+{
+	struct spcom_channel *ch;
+	static DECLARE_WORK(rpmsg_rx_consumer, spcom_signal_rx_done);
+	struct rx_buff_list *rx_item;
+	unsigned long flags;
+
+	if (!rpdev || !data) {
+		pr_err("rpdev or data is NULL\n");
+		return -EINVAL;
+	}
+	pr_debug("incoming msg from %s\n", rpdev->id.name);
+	ch = dev_get_drvdata(&rpdev->dev);
+	if (!ch) {
+		pr_err("%s: invalid ch\n", __func__);
+		return -EINVAL;
+	}
+	if (len > SPCOM_RX_BUF_SIZE || len <= 0) {
+		pr_err("got msg size %d, max allowed %d\n",
+		       len, SPCOM_RX_BUF_SIZE);
+		return -EINVAL;
+	}
+
+	rx_item = kzalloc(sizeof(*rx_item), GFP_ATOMIC);
+	if (!rx_item)
+		return -ENOMEM;
+
+	rx_item->rpmsg_rx_buf = kmemdup(data, len, GFP_ATOMIC);
+	if (!rx_item->rpmsg_rx_buf)
+		return -ENOMEM;
+
+	rx_item->rx_buf_size = len;
+	rx_item->ch = ch;
+
+	spin_lock_irqsave(&spcom_dev->rx_lock, flags);
+	list_add(&rx_item->list, &spcom_dev->rx_list_head);
+	spin_unlock_irqrestore(&spcom_dev->rx_lock, flags);
+	pr_debug("signaling rx item for %s, received %d bytes\n",
+	       rpdev->id.name, len);
+
+	schedule_work(&rpmsg_rx_consumer);
+	return 0;
+}
+
+static int spcom_rpdev_probe(struct rpmsg_device *rpdev)
+{
+	const char *name;
+	struct spcom_channel *ch;
+
+	if (!rpdev) {
+		pr_err("rpdev is NULL\n");
+		return -EINVAL;
+	}
+	name = rpdev->id.name;
+	pr_debug("new channel %s rpmsg_device arrived\n", name);
+	ch = spcom_find_channel_by_name(name);
+	if (!ch) {
+		pr_err("channel %s not found\n", name);
+		return -ENODEV;
+	}
+	mutex_lock(&ch->lock);
+	ch->rpdev = rpdev;
+	ch->rpmsg_abort = false;
+	ch->txn_id = INITIAL_TXN_ID;
+	complete_all(&ch->connect);
+	mutex_unlock(&ch->lock);
+
+	dev_set_drvdata(&rpdev->dev, ch);
+
+	/* used to evaluate underlying transport link up/down */
+	atomic_inc(&spcom_dev->rpmsg_dev_count);
+	if (atomic_read(&spcom_dev->rpmsg_dev_count) == 1)
+		complete_all(&spcom_dev->rpmsg_state_change);
+
+	return 0;
+}
+
+static void spcom_rpdev_remove(struct rpmsg_device *rpdev)
+{
+	struct spcom_channel *ch;
+	int i;
+
+	if (!rpdev) {
+		pr_err("rpdev is NULL\n");
+		return;
+	}
+
+	dev_info(&rpdev->dev, "rpmsg device %s removed\n", rpdev->id.name);
+	ch = dev_get_drvdata(&rpdev->dev);
+	if (!ch) {
+		pr_err("channel %s not found\n", rpdev->id.name);
+		return;
+	}
+
+	mutex_lock(&ch->lock);
+	// unlock all ion buffers of sp_kernel channel
+	if (strcmp(ch->name, "sp_kernel") == 0) {
+		for (i = 0; i < ARRAY_SIZE(ch->dmabuf_handle_table); i++) {
+			if (ch->dmabuf_handle_table[i] != NULL) {
+				pr_debug("unlocked ion buf #%d fd [%d]\n",
+					i, ch->dmabuf_fd_table[i]);
+				dma_buf_put(ch->dmabuf_handle_table[i]);
+				ch->dmabuf_handle_table[i] = NULL;
+				ch->dmabuf_fd_table[i] = -1;
+			}
+		}
+	}
+
+	ch->rpdev = NULL;
+	ch->rpmsg_abort = true;
+	ch->txn_id = 0;
+	complete_all(&ch->rx_done);
+	mutex_unlock(&ch->lock);
+
+	/* used to evaluate underlying transport link up/down */
+	if (atomic_dec_and_test(&spcom_dev->rpmsg_dev_count))
+		complete_all(&spcom_dev->rpmsg_state_change);
+
+}
+
+/* register rpmsg driver to match with channel ch_name */
+static int spcom_register_rpmsg_drv(struct spcom_channel *ch)
+{
+	struct rpmsg_driver *rpdrv;
+	struct rpmsg_device_id *match;
+	char *drv_name;
+	int ret;
+
+	if (ch->rpdrv) {
+		pr_err("ch:%s, rpmsg driver %s already registered\n", ch->name,
+		       ch->rpdrv->id_table->name);
+		return -ENODEV;
+	}
+
+	rpdrv = kzalloc(sizeof(*rpdrv), GFP_KERNEL);
+	if (!rpdrv)
+		return -ENOMEM;
+
+	/* zalloc array of two to NULL terminate the match list */
+	match = kzalloc(2 * sizeof(*match), GFP_KERNEL);
+	if (!match) {
+		kfree(rpdrv);
+		return -ENOMEM;
+	}
+	snprintf(match->name, RPMSG_NAME_SIZE, "%s", ch->name);
+
+	drv_name = kasprintf(GFP_KERNEL, "%s_%s", "spcom_rpmsg_drv", ch->name);
+	if (!drv_name) {
+		pr_err("can't allocate drv_name for %s\n", ch->name);
+		kfree(rpdrv);
+		kfree(match);
+		return -ENOMEM;
+	}
+
+	rpdrv->probe = spcom_rpdev_probe;
+	rpdrv->remove = spcom_rpdev_remove;
+	rpdrv->callback = spcom_rpdev_cb;
+	rpdrv->id_table = match;
+	rpdrv->drv.name = drv_name;
+	ret = register_rpmsg_driver(rpdrv);
+	if (ret) {
+		pr_err("can't register rpmsg_driver for %s\n", ch->name);
+		kfree(rpdrv);
+		kfree(match);
+		kfree(drv_name);
+		return ret;
+	}
+	mutex_lock(&ch->lock);
+	ch->rpdrv = rpdrv;
+	ch->rpmsg_abort = false;
+	mutex_unlock(&ch->lock);
+
+	return 0;
+}
+
+static int spcom_unregister_rpmsg_drv(struct spcom_channel *ch)
+{
+	if (!ch->rpdrv)
+		return -ENODEV;
+	unregister_rpmsg_driver(ch->rpdrv);
+
+	mutex_lock(&ch->lock);
+	kfree(ch->rpdrv->drv.name);
+	kfree((void *)ch->rpdrv->id_table);
+	kfree(ch->rpdrv);
+	ch->rpdrv = NULL;
+	ch->rpmsg_abort = true; /* will unblock spcom_rx() */
+	mutex_unlock(&ch->lock);
+	return 0;
+}
+
+static int spcom_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct spcom_device *dev = NULL;
+	struct device_node *np;
+
+	if (!pdev) {
+		pr_err("invalid pdev\n");
+		return -ENODEV;
+	}
+
+	np = pdev->dev.of_node;
+	if (!np) {
+		pr_err("invalid DT node\n");
+		return -EINVAL;
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (dev == NULL)
+		return -ENOMEM;
+
+	spcom_dev = dev;
+	spcom_dev->pdev = pdev;
+	/* start counting exposed channel char devices from 1 */
+	atomic_set(&spcom_dev->chdev_count, 1);
+	init_completion(&spcom_dev->rpmsg_state_change);
+	atomic_set(&spcom_dev->rpmsg_dev_count, 0);
+
+	INIT_LIST_HEAD(&spcom_dev->rx_list_head);
+	spin_lock_init(&spcom_dev->rx_lock);
+
+	ret = spcom_register_chardev();
+	if (ret) {
+		pr_err("create character device failed\n");
+		goto fail_while_chardev_reg;
+	}
+
+	ret = spcom_parse_dt(np);
+	if (ret < 0)
+		goto fail_reg_chardev;
+
+	ret = spcom_create_predefined_channels_chardev();
+	if (ret < 0) {
+		pr_err("create character device failed\n");
+		goto fail_reg_chardev;
+	}
+	pr_debug("Driver Initialization ok\n");
+	return 0;
+
+fail_reg_chardev:
+	pr_err("failed to init driver\n");
+	spcom_unregister_chrdev();
+fail_while_chardev_reg:
+	kfree(dev);
+	spcom_dev = NULL;
+
+	return -ENODEV;
+}
+
+static const struct of_device_id spcom_match_table[] = {
+	{ .compatible = "qcom,spcom", },
+	{ },
+};
+
+static struct platform_driver spcom_driver = {
+	.probe = spcom_probe,
+	.driver = {
+		.name = DEVICE_NAME,
+		.of_match_table = of_match_ptr(spcom_match_table),
+	},
+};
+
+static int __init spcom_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&spcom_driver);
+	if (ret)
+		pr_err("spcom_driver register failed %d\n", ret);
+
+	return ret;
+}
+module_init(spcom_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Secure Processor Communication");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 3e312c9..3037b9d 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -26,7 +26,6 @@
 #include <linux/thermal.h>
 #include <linux/cpufreq.h>
 #include <linux/err.h>
-#include <linux/idr.h>
 #include <linux/pm_opp.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
@@ -113,28 +112,9 @@
 	struct cpu_cooling_ops *plat_ops;
 };
 
-static DEFINE_IDA(cpufreq_ida);
 static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_cdev_list);
 
-static struct cpumask cpus_in_max_cooling_level;
-static BLOCKING_NOTIFIER_HEAD(cpu_max_cooling_level_notifer);
-
-void cpu_cooling_max_level_notifier_register(struct notifier_block *n)
-{
-	blocking_notifier_chain_register(&cpu_max_cooling_level_notifer, n);
-}
-
-void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n)
-{
-	blocking_notifier_chain_unregister(&cpu_max_cooling_level_notifer, n);
-}
-
-const struct cpumask *cpu_cooling_get_max_level_cpumask(void)
-{
-	return &cpus_in_max_cooling_level;
-}
-
 /* Below code defines functions to be used for cpufreq as cooling device */
 
 /**
@@ -760,12 +740,7 @@
 		goto free_idle_time;
 	}
 
-	ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
-	if (ret < 0) {
-		cdev = ERR_PTR(ret);
-		goto free_table;
-	}
-	cpufreq_cdev->id = ret;
+	cpufreq_cdev->id = policy->cpu;
 
 	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
 		 cpufreq_cdev->id);
@@ -786,7 +761,7 @@
 		ret = update_freq_table(cpufreq_cdev, capacitance);
 		if (ret) {
 			cdev = ERR_PTR(ret);
-			goto remove_ida;
+			goto free_table;
 		}
 
 		cooling_ops = &cpufreq_power_cooling_ops;
@@ -799,7 +774,7 @@
 	cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
 						  cooling_ops);
 	if (IS_ERR(cdev))
-		goto remove_ida;
+		goto free_table;
 
 	cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
 	cpufreq_cdev->floor_freq =
@@ -819,8 +794,6 @@
 
 	return cdev;
 
-remove_ida:
-	ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 free_table:
 	kfree(cpufreq_cdev->freq_table);
 free_idle_time:
@@ -969,7 +942,6 @@
 	}
 
 	thermal_cooling_device_unregister(cpufreq_cdev->cdev);
-	ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 	kfree(cpufreq_cdev->idle_time);
 	kfree(cpufreq_cdev->freq_table);
 	kfree(cpufreq_cdev);
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 668b71a..0b0ac7b 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -101,3 +101,14 @@
 	  voltage.
 
 	  If you want this support, you should say Y here.
+
+config QTI_CPU_ISOLATE_COOLING_DEVICE
+	bool "QTI CPU Isolate cooling devices"
+	depends on THERMAL_OF
+	help
+	   This enables the QTI CPU Isolation cooling devices. These cooling
+	   devices will be used by QTI chipset to isolate a CPU from being
+	   scheduled and hence will let the CPU to power collapse. Isolating
+	   a CPU will be used when the CPU frequency mitigation
+	   is not good enough to achieve the necessary cooling.
+
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 1231110..e686496 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o
 obj-$(CONFIG_QTI_AOP_REG_COOLING_DEVICE) += regulator_aop_cdev.o
 obj-$(CONFIG_REGULATOR_COOLING_DEVICE) += regulator_cdev.o
+obj-$(CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE) += cpu_isolate.o
diff --git a/drivers/thermal/qcom/cpu_isolate.c b/drivers/thermal/qcom/cpu_isolate.c
new file mode 100644
index 0000000..d09feb5
--- /dev/null
+++ b/drivers/thermal/qcom/cpu_isolate.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/of_device.h>
+#include <linux/suspend.h>
+
+#define CPU_ISOLATE_LEVEL 1
+
+struct cpu_isolate_cdev {
+	struct list_head node;
+	int cpu_id;
+	bool cpu_isolate_state;
+	struct thermal_cooling_device *cdev;
+	struct device_node *np;
+	struct work_struct reg_work;
+};
+
+static DEFINE_MUTEX(cpu_isolate_lock);
+static LIST_HEAD(cpu_isolate_cdev_list);
+static atomic_t in_suspend;
+static struct cpumask cpus_pending_online;
+static struct cpumask cpus_isolated_by_thermal;
+
+static struct cpumask cpus_in_max_cooling_level;
+static BLOCKING_NOTIFIER_HEAD(cpu_max_cooling_level_notifer);
+
+void cpu_cooling_max_level_notifier_register(struct notifier_block *n)
+{
+	blocking_notifier_chain_register(&cpu_max_cooling_level_notifer, n);
+}
+
+void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n)
+{
+	blocking_notifier_chain_unregister(&cpu_max_cooling_level_notifer, n);
+}
+
+const struct cpumask *cpu_cooling_get_max_level_cpumask(void)
+{
+	return &cpus_in_max_cooling_level;
+}
+
+static int cpu_isolate_pm_notify(struct notifier_block *nb,
+				unsigned long mode, void *_unused)
+{
+	struct cpu_isolate_cdev *cpu_isolate_cdev;
+	unsigned int cpu;
+
+	switch (mode) {
+	case PM_HIBERNATION_PREPARE:
+	case PM_RESTORE_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		atomic_set(&in_suspend, 1);
+		break;
+	case PM_POST_HIBERNATION:
+	case PM_POST_RESTORE:
+	case PM_POST_SUSPEND:
+		mutex_lock(&cpu_isolate_lock);
+		list_for_each_entry(cpu_isolate_cdev, &cpu_isolate_cdev_list,
+					node) {
+			if (cpu_isolate_cdev->cpu_id == -1)
+				continue;
+			if (cpu_isolate_cdev->cpu_isolate_state) {
+				cpu = cpu_isolate_cdev->cpu_id;
+				if (cpu_online(cpu) &&
+					!cpumask_test_and_set_cpu(cpu,
+					&cpus_isolated_by_thermal)) {
+					if (sched_isolate_cpu(cpu))
+						cpumask_clear_cpu(cpu,
+						&cpus_isolated_by_thermal);
+				}
+				continue;
+			}
+		}
+		mutex_unlock(&cpu_isolate_lock);
+		atomic_set(&in_suspend, 0);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static struct notifier_block cpu_isolate_pm_nb = {
+	.notifier_call = cpu_isolate_pm_notify,
+};
+
+static int cpu_isolate_hp_offline(unsigned int offline_cpu)
+{
+	struct cpu_isolate_cdev *cpu_isolate_cdev;
+
+	mutex_lock(&cpu_isolate_lock);
+	list_for_each_entry(cpu_isolate_cdev, &cpu_isolate_cdev_list, node) {
+		if (offline_cpu != cpu_isolate_cdev->cpu_id)
+			continue;
+
+		if (!cpu_isolate_cdev->cdev)
+			break;
+
+		if ((cpu_isolate_cdev->cpu_isolate_state)
+			&& (cpumask_test_and_clear_cpu(offline_cpu,
+			&cpus_isolated_by_thermal)))
+			sched_unisolate_cpu_unlocked(offline_cpu);
+		break;
+	}
+	mutex_unlock(&cpu_isolate_lock);
+
+	return 0;
+}
+
+static int cpu_isolate_hp_online(unsigned int online_cpu)
+{
+	struct cpu_isolate_cdev *cpu_isolate_cdev;
+	int ret = 0;
+
+	if (atomic_read(&in_suspend))
+		return 0;
+
+	mutex_lock(&cpu_isolate_lock);
+	list_for_each_entry(cpu_isolate_cdev, &cpu_isolate_cdev_list, node) {
+		if (online_cpu != cpu_isolate_cdev->cpu_id)
+			continue;
+
+		if (cpu_isolate_cdev->cdev) {
+			if (cpu_isolate_cdev->cpu_isolate_state) {
+				cpumask_set_cpu(online_cpu,
+						&cpus_pending_online);
+				ret = NOTIFY_BAD;
+			}
+		} else {
+			queue_work(system_highpri_wq,
+					&cpu_isolate_cdev->reg_work);
+		}
+
+		break;
+	}
+	mutex_unlock(&cpu_isolate_lock);
+
+	return ret;
+}
+
+/**
+ * cpu_isolate_set_cur_state - callback function to set the current cooling
+ *				state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpu isolation
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpu_isolate_set_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long state)
+{
+	struct cpu_isolate_cdev *cpu_isolate_cdev = cdev->devdata;
+	struct device *cpu_dev;
+	int ret = 0;
+	int cpu = 0;
+
+	if (cpu_isolate_cdev->cpu_id == -1)
+		return -ENODEV;
+
+	/* Request state should be less than max_level */
+	if (state > CPU_ISOLATE_LEVEL)
+		state = CPU_ISOLATE_LEVEL;
+
+	state = !!state;
+	/* Check if the old cooling action is same as new cooling action */
+	if (cpu_isolate_cdev->cpu_isolate_state == state)
+		return 0;
+
+	mutex_lock(&cpu_isolate_lock);
+	cpu = cpu_isolate_cdev->cpu_id;
+	cpu_isolate_cdev->cpu_isolate_state = state;
+	if (state == CPU_ISOLATE_LEVEL) {
+		if (cpu_online(cpu) &&
+			(!cpumask_test_and_set_cpu(cpu,
+			&cpus_isolated_by_thermal))) {
+			if (sched_isolate_cpu(cpu))
+				cpumask_clear_cpu(cpu,
+					&cpus_isolated_by_thermal);
+		}
+		cpumask_set_cpu(cpu, &cpus_in_max_cooling_level);
+		blocking_notifier_call_chain(&cpu_max_cooling_level_notifer,
+						1, (void *)(long)cpu);
+	} else {
+		if (cpumask_test_and_clear_cpu(cpu, &cpus_pending_online)) {
+			cpu_dev = get_cpu_device(cpu);
+			mutex_unlock(&cpu_isolate_lock);
+			ret = device_online(cpu_dev);
+			if (ret)
+				pr_err("CPU:%d online error:%d\n", cpu, ret);
+			return ret;
+		} else if (cpumask_test_and_clear_cpu(cpu,
+			&cpus_isolated_by_thermal)) {
+			sched_unisolate_cpu(cpu);
+		}
+		cpumask_clear_cpu(cpu, &cpus_in_max_cooling_level);
+		blocking_notifier_call_chain(&cpu_max_cooling_level_notifer,
+						0, (void *)(long)cpu);
+	}
+	mutex_unlock(&cpu_isolate_lock);
+
+	return 0;
+}
+
+/**
+ * cpu_isolate_get_cur_state - callback function to get the current cooling
+ *				state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the current cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpu isolation
+ * current cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpu_isolate_get_cur_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct cpu_isolate_cdev *cpu_isolate_cdev = cdev->devdata;
+
+	*state = (cpu_isolate_cdev->cpu_isolate_state) ?
+			CPU_ISOLATE_LEVEL : 0;
+
+	return 0;
+}
+
+/**
+ * cpu_isolate_get_max_state - callback function to get the max cooling state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the max cooling state.
+ *
+ * Callback for the thermal cooling device to return the cpu
+ * isolation max cooling state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpu_isolate_get_max_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	*state = CPU_ISOLATE_LEVEL;
+	return 0;
+}
+
+static struct thermal_cooling_device_ops cpu_isolate_cooling_ops = {
+	.get_max_state = cpu_isolate_get_max_state,
+	.get_cur_state = cpu_isolate_get_cur_state,
+	.set_cur_state = cpu_isolate_set_cur_state,
+};
+
+static void cpu_isolate_register_cdev(struct work_struct *work)
+{
+	struct cpu_isolate_cdev *cpu_isolate_cdev =
+			container_of(work, struct cpu_isolate_cdev, reg_work);
+	char cdev_name[THERMAL_NAME_LENGTH] = "";
+	int ret = 0;
+
+	snprintf(cdev_name, THERMAL_NAME_LENGTH, "cpu-isolate%d",
+			cpu_isolate_cdev->cpu_id);
+
+	cpu_isolate_cdev->cdev = thermal_of_cooling_device_register(
+					cpu_isolate_cdev->np,
+					cdev_name,
+					cpu_isolate_cdev,
+					&cpu_isolate_cooling_ops);
+	if (IS_ERR(cpu_isolate_cdev->cdev)) {
+		ret = PTR_ERR(cpu_isolate_cdev->cdev);
+		pr_err("Cooling register failed for %s, ret:%ld\n",
+			cdev_name, ret);
+		cpu_isolate_cdev->cdev = NULL;
+		return;
+	}
+	pr_debug("Cooling device [%s] registered.\n", cdev_name);
+}
+
+static int cpu_isolate_probe(struct platform_device *pdev)
+{
+	int ret = 0, cpu = 0;
+	struct device_node *dev_phandle, *subsys_np;
+	struct device *cpu_dev;
+	struct cpu_isolate_cdev *cpu_isolate_cdev = NULL;
+	struct device_node *np = pdev->dev.of_node;
+
+	INIT_LIST_HEAD(&cpu_isolate_cdev_list);
+	for_each_available_child_of_node(np, subsys_np) {
+		cpu_isolate_cdev = devm_kzalloc(&pdev->dev,
+				sizeof(*cpu_isolate_cdev), GFP_KERNEL);
+		if (!cpu_isolate_cdev)
+			return -ENOMEM;
+		cpu_isolate_cdev->cpu_id = -1;
+		cpu_isolate_cdev->cpu_isolate_state = false;
+		cpu_isolate_cdev->cdev = NULL;
+		cpu_isolate_cdev->np = subsys_np;
+
+		dev_phandle = of_parse_phandle(subsys_np, "qcom,cpu", 0);
+		for_each_possible_cpu(cpu) {
+			cpu_dev = get_cpu_device(cpu);
+			if (cpu_dev && cpu_dev->of_node == dev_phandle) {
+				cpu_isolate_cdev->cpu_id = cpu;
+				break;
+			}
+		}
+		INIT_WORK(&cpu_isolate_cdev->reg_work,
+				cpu_isolate_register_cdev);
+		list_add(&cpu_isolate_cdev->node, &cpu_isolate_cdev_list);
+	}
+
+	atomic_set(&in_suspend, 0);
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpu-isolate/cdev:online",
+				cpu_isolate_hp_online, cpu_isolate_hp_offline);
+	if (ret < 0)
+		return ret;
+	register_pm_notifier(&cpu_isolate_pm_nb);
+	cpumask_clear(&cpus_in_max_cooling_level);
+	ret = 0;
+
+	return ret;
+}
+
+static const struct of_device_id cpu_isolate_match[] = {
+	{ .compatible = "qcom,cpu-isolate", },
+	{},
+};
+
+static struct platform_driver cpu_isolate_driver = {
+	.probe		= cpu_isolate_probe,
+	.driver		= {
+		.name = KBUILD_MODNAME,
+		.of_match_table = cpu_isolate_match,
+	},
+};
+builtin_platform_driver(cpu_isolate_driver);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index cd42f56..1ff393b 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -160,7 +160,6 @@
 	int ioctl_count;
 	int edge_count;
 	bool manual_flow;
-	bool sampling_rate;
 };
 
 static const struct uart_ops msm_geni_serial_pops;
@@ -1769,8 +1768,6 @@
 		u32 rx_parity_cfg, u32 bits_per_char, u32 stop_bit_len,
 		u32 s_clk_cfg)
 {
-	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
-
 	geni_write_reg_nolog(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
 	geni_write_reg_nolog(tx_trans_cfg, uport->membase,
 							SE_UART_TX_TRANS_CFG);
@@ -1788,8 +1785,8 @@
 						SE_UART_TX_STOP_BIT_LEN);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
 	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
-	if (msm_port->sampling_rate)
-		geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
+
+	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
 }
 
 static int get_clk_div_rate(unsigned int baud, unsigned long *desired_clk_rate)
@@ -1810,6 +1807,9 @@
 	clk_div = ser_clk / *desired_clk_rate;
 	*desired_clk_rate = ser_clk;
 exit_get_clk_div_rate:
+	if (clk_div)
+		clk_div = clk_div*2;
+
 	return clk_div;
 }
 
@@ -1829,8 +1829,9 @@
 	unsigned long clk_rate;
 	unsigned long flags;
 
-	if (port->sampling_rate)
-		geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_S_CLK_CFG);
+	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
 
 	if (!uart_console(uport)) {
 		int ret = msm_geni_serial_power_on(uport);
@@ -1856,8 +1857,6 @@
 	if (clk_div <= 0)
 		goto exit_set_termios;
 
-	if (port->sampling_rate)
-		clk_div = clk_div*2;
 	uport->uartclk = clk_rate;
 	clk_set_rate(port->serial_rsc.se_clk, clk_rate);
 	ser_clk_cfg |= SER_CLK_EN;
@@ -2125,7 +2124,6 @@
 		goto exit_geni_serial_earlyconsetup;
 	}
 
-	clk_div = clk_div*2;
 	s_clk_cfg |= SER_CLK_EN;
 	s_clk_cfg |= (clk_div << CLK_DIV_SHFT);
 
@@ -2135,6 +2133,9 @@
 	 */
 	msm_geni_serial_poll_cancel_tx(uport);
 	msm_geni_serial_abort_rx(uport);
+
+	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_S_CLK_CFG);
 	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
 
 	se_get_packing_config(8, 1, false, &cfg0, &cfg1);
@@ -2378,10 +2379,6 @@
 
 	uport->dev = &pdev->dev;
 
-	dev_port->sampling_rate =
-		of_property_read_bool(pdev->dev.of_node,
-				"qcom,change-sampling-rate");
-
 	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
 					"qcom,wrapper-core", 0);
 	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
@@ -2522,8 +2519,9 @@
 		pm_runtime_enable(&pdev->dev);
 	}
 
-	if (dev_port->sampling_rate)
-		geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(0x21, uport->membase, GENI_SER_S_CLK_CFG);
+	geni_read_reg_nolog(uport->membase, GENI_SER_M_CLK_CFG);
 
 	dev_info(&pdev->dev, "Serial port%d added.FifoSize %d is_console%d\n",
 				line, uport->fifosize, is_console);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 63e4dcd..1f9f05f 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -3473,8 +3473,8 @@
 
 	memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
 	for (i = 0; i < USB_MAX_IRQ; i++) {
-		irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
-						IRQF_ONESHOT;
+		irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
+			IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
 		mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
 					mdwc->wakeup_irq[i].name);
 		if (mdwc->wakeup_irq[i].irq < 0) {
@@ -3490,11 +3490,6 @@
 		} else {
 			irq_set_status_flags(mdwc->wakeup_irq[i].irq,
 						IRQ_NOAUTOEN);
-			/* ss_phy_irq is level trigger interrupt */
-			if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
-				irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
-					IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
-
 			ret = devm_request_threaded_irq(&pdev->dev,
 					mdwc->wakeup_irq[i].irq,
 					msm_dwc3_pwr_irq,
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index e9f8661..7775dd7 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -54,9 +54,6 @@
  */
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
 
-extern void cpu_cooling_max_level_notifier_register(struct notifier_block *n);
-extern void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n);
-extern const struct cpumask *cpu_cooling_get_max_level_cpumask(void);
 #else /* !CONFIG_CPU_THERMAL */
 static inline struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy)
@@ -93,4 +90,24 @@
 }
 #endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
 
+#ifdef CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE
+extern void cpu_cooling_max_level_notifier_register(struct notifier_block *n);
+extern void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n);
+extern const struct cpumask *cpu_cooling_get_max_level_cpumask(void);
+#else
+static inline
+void cpu_cooling_max_level_notifier_register(struct notifier_block *n)
+{
+}
+
+static inline
+void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n)
+{
+}
+
+static inline const struct cpumask *cpu_cooling_get_max_level_cpumask(void)
+{
+	return cpu_none_mask;
+}
+#endif /* CONFIG_QTI_CPU_ISOLATE_COOLING_DEVICE */
 #endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index 2844ab6..aca7fba 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPA_WDI3_H_
@@ -47,10 +47,12 @@
  * @is_uC_ready: is uC ready. No API should be called until uC
     is ready.
  * @is_smmu_enable: is smmu enabled
+ * @is_over_gsi: is wdi over GSI or uC
  */
 struct ipa_wdi_init_out_params {
 	bool is_uC_ready;
 	bool is_smmu_enabled;
+	bool is_over_gsi;
 };
 
 /**
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index de73778..5957349 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -11,7 +11,10 @@
 #define LINUX_MMC_CARD_H
 
 #include <linux/device.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/mmc.h>
 #include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
 
 struct mmc_cid {
 	unsigned int		manfid;
@@ -60,7 +63,7 @@
 	unsigned int		part_time;		/* Units: ms */
 	unsigned int		sa_timeout;		/* Units: 100ns */
 	unsigned int		generic_cmd6_time;	/* Units: 10ms */
-	unsigned int            power_off_longtime;     /* Units: ms */
+	unsigned int        power_off_longtime;     /* Units: ms */
 	u8			power_off_notification;	/* state */
 	unsigned int		hs_max_dtr;
 	unsigned int		hs200_max_dtr;
@@ -88,6 +91,8 @@
 	unsigned int            data_tag_unit_size;     /* DATA TAG UNIT size */
 	unsigned int		boot_ro_lock;		/* ro lock support */
 	bool			boot_ro_lockable;
+	u8			raw_ext_csd_cmdq;	/* 15 */
+	u8			raw_ext_csd_cache_ctrl;	/* 33 */
 	bool			ffu_capable;	/* Firmware upgrade support */
 	bool			cmdq_en;	/* Command Queue enabled */
 	bool			cmdq_support;	/* Command Queue supported */
@@ -98,7 +103,10 @@
 	u8			raw_partition_support;	/* 160 */
 	u8			raw_rpmb_size_mult;	/* 168 */
 	u8			raw_erased_mem_count;	/* 181 */
+	u8			raw_ext_csd_bus_width;	/* 183 */
 	u8			strobe_support;		/* 184 */
+#define MMC_STROBE_SUPPORT	(1 << 0)
+	u8			raw_ext_csd_hs_timing;	/* 185 */
 	u8			raw_ext_csd_structure;	/* 194 */
 	u8			raw_card_type;		/* 196 */
 	u8			raw_driver_strength;	/* 197 */
@@ -119,13 +127,18 @@
 	u8			raw_pwr_cl_200_360;	/* 237 */
 	u8			raw_pwr_cl_ddr_52_195;	/* 238 */
 	u8			raw_pwr_cl_ddr_52_360;	/* 239 */
+	u8			cache_flush_policy;	/* 240 */
+#define MMC_BKOPS_URGENCY_MASK 0x3
 	u8			raw_pwr_cl_ddr_200_360;	/* 253 */
 	u8			raw_bkops_status;	/* 246 */
 	u8			raw_sectors[4];		/* 212 - 4 bytes */
 	u8			pre_eol_info;		/* 267 */
 	u8			device_life_time_est_typ_a;	/* 268 */
 	u8			device_life_time_est_typ_b;	/* 269 */
+	u8			barrier_support;	/* 486 */
+	u8			barrier_en;
 
+	u8			fw_version;		/* 254 */
 	unsigned int            feature_support;
 #define MMC_DISCARD_FEATURE	BIT(0)                  /* CMD38 feature */
 };
@@ -197,7 +210,8 @@
 				wide_bus:1,
 				high_power:1,
 				high_speed:1,
-				disable_cd:1;
+				disable_cd:1,
+				async_intr_sup:1;
 };
 
 struct sdio_cis {
@@ -208,6 +222,7 @@
 };
 
 struct mmc_host;
+struct mmc_ios;
 struct sdio_func;
 struct sdio_func_tuple;
 struct mmc_queue_req;
@@ -238,6 +253,62 @@
 #define MMC_BLK_DATA_AREA_RPMB	(1<<3)
 };
 
+enum {
+	MMC_BKOPS_NO_OP,
+	MMC_BKOPS_NOT_CRITICAL,
+	MMC_BKOPS_PERF_IMPACT,
+	MMC_BKOPS_CRITICAL,
+	MMC_BKOPS_NUM_SEVERITY_LEVELS,
+};
+
+/**
+ * struct mmc_bkops_stats - BKOPS statistics
+ * @lock: spinlock used for synchronizing the debugfs and the runtime accesses
+ *	to this structure. No need to call with spin_lock_irq api
+ * @manual_start: number of times START_BKOPS was sent to the device
+ * @hpi: number of times HPI was sent to the device
+ * @auto_start: number of times AUTO_EN was set to 1
+ * @auto_stop: number of times AUTO_EN was set to 0
+ * @level: number of times the device reported the need for each level of
+ *	bkops handling
+ * @enabled: control over whether statistics should be gathered
+ *
+ * This structure is used to collect statistics regarding the bkops
+ * configuration and use-patterns. It is collected during runtime and can be
+ * shown to the user via a debugfs entry.
+ */
+struct mmc_bkops_stats {
+	spinlock_t	lock;
+	unsigned int	manual_start;
+	unsigned int	hpi;
+	unsigned int	auto_start;
+	unsigned int	auto_stop;
+	unsigned int	level[MMC_BKOPS_NUM_SEVERITY_LEVELS];
+	bool		enabled;
+};
+
+/**
+ * struct mmc_bkops_info - BKOPS data
+ * @stats: statistic information regarding bkops
+ * @needs_check: indication whether need to check with the device
+ *	whether it requires handling of BKOPS (CMD8)
+ * @needs_manual: indication whether have to send START_BKOPS
+ *	to the device
+ */
+struct mmc_bkops_info {
+	struct mmc_bkops_stats stats;
+	bool needs_check;
+	bool needs_bkops;
+	u32  retry_counter;
+};
+
+enum mmc_pon_type {
+	MMC_LONG_PON = 1,
+	MMC_SHRT_PON,
+};
+
+#define mmc_card_strobe(c) (((c)->ext_csd).strobe_support & MMC_STROBE_SUPPORT)
+
 /*
  * MMC device
  */
@@ -245,6 +316,10 @@
 	struct mmc_host		*host;		/* the host this device belongs to */
 	struct device		dev;		/* the device */
 	u32			ocr;		/* the current OCR setting */
+	unsigned long		clk_scaling_lowest;	/* lowest scaleable*/
+							/* frequency */
+	unsigned long		clk_scaling_highest;	/* highest scaleable */
+							/* frequency */
 	unsigned int		rca;		/* relative card address of device */
 	unsigned int		type;		/* card type */
 #define MMC_TYPE_MMC		0		/* MMC card */
@@ -259,6 +334,8 @@
 						/* for byte mode */
 #define MMC_QUIRK_NONSTD_SDIO	(1<<2)		/* non-standard SDIO card attached */
 						/* (missing CIA registers) */
+#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3)	/* clock gating the sdio bus */
+						/* will make card fail */
 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4)		/* SDIO card has nonstd function interfaces */
 #define MMC_QUIRK_DISABLE_CD	(1<<5)		/* disconnect CD/DAT[3] resistor */
 #define MMC_QUIRK_INAND_CMD38	(1<<6)		/* iNAND devices have broken CMD38 */
@@ -270,6 +347,14 @@
 #define MMC_QUIRK_BROKEN_IRQ_POLLING	(1<<11)	/* Polling SDIO_CCCR_INTx could create a fake interrupt */
 #define MMC_QUIRK_TRIM_BROKEN	(1<<12)		/* Skip trim */
 #define MMC_QUIRK_BROKEN_HPI	(1<<13)		/* Disable broken HPI support */
+						/* byte mode */
+#define MMC_QUIRK_INAND_DATA_TIMEOUT  (1<<14)   /* For incorrect data timeout */
+#define MMC_QUIRK_CACHE_DISABLE (1 << 15)	/* prevent cache enable */
+#define MMC_QUIRK_QCA6574_SETTINGS (1 << 16)	/* QCA6574 card settings*/
+#define MMC_QUIRK_QCA9377_SETTINGS (1 << 17)	/* QCA9377 card settings*/
+
+/* Make sure CMDQ is empty before queuing DCMD */
+#define MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD (1 << 18)
 
 	bool			reenable_cmdq;	/* Re-enable Command Queue */
 
@@ -305,9 +390,12 @@
 
 	struct dentry		*debugfs_root;
 	struct mmc_part	part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
-	unsigned int    nr_parts;
+	unsigned int		nr_parts;
+	unsigned int		part_curr;
 
-	unsigned int		bouncesz;	/* Bounce buffer size */
+	struct notifier_block   reboot_notify;
+	enum mmc_pon_type	pon_type;
+	struct mmc_bkops_info bkops;
 };
 
 static inline bool mmc_large_sector(struct mmc_card *card)
@@ -315,10 +403,53 @@
 	return card->ext_csd.data_sector_size == 4096;
 }
 
+/* extended CSD mapping to mmc version */
+enum mmc_version_ext_csd_rev {
+	MMC_V4_0,
+	MMC_V4_1,
+	MMC_V4_2,
+	MMC_V4_41 = 5,
+	MMC_V4_5,
+	MMC_V4_51 = MMC_V4_5,
+	MMC_V5_0,
+	MMC_V5_01 = MMC_V5_0,
+	MMC_V5_1
+};
+
 bool mmc_card_is_blockaddr(struct mmc_card *card);
 
 #define mmc_card_mmc(c)		((c)->type == MMC_TYPE_MMC)
 #define mmc_card_sd(c)		((c)->type == MMC_TYPE_SD)
 #define mmc_card_sdio(c)	((c)->type == MMC_TYPE_SDIO)
 
+static inline bool mmc_card_support_auto_bkops(const struct mmc_card *c)
+{
+	return c->ext_csd.rev >= MMC_V5_1;
+}
+
+static inline bool mmc_card_configured_manual_bkops(const struct mmc_card *c)
+{
+	return c->ext_csd.man_bkops_en;
+}
+
+static inline bool mmc_card_configured_auto_bkops(const struct mmc_card *c)
+{
+	return c->ext_csd.auto_bkops_en;
+}
+
+static inline bool mmc_enable_qca6574_settings(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_QCA6574_SETTINGS;
+}
+
+static inline bool mmc_enable_qca9377_settings(const struct mmc_card *c)
+{
+	return c->quirks & MMC_QUIRK_QCA9377_SETTINGS;
+}
+
+#define mmc_dev_to_card(d)	container_of(d, struct mmc_card, dev)
+#define mmc_get_drvdata(c)	dev_get_drvdata(&(c)->dev)
+#define mmc_set_drvdata(c, d)	dev_set_drvdata(&(c)->dev, d)
+
+extern int mmc_send_pon(struct mmc_card *card);
 #endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 134a648..cc0faf6 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -8,6 +8,7 @@
 #ifndef LINUX_MMC_CORE_H
 #define LINUX_MMC_CORE_H
 
+#include <uapi/linux/mmc/core.h>
 #include <linux/completion.h>
 #include <linux/types.h>
 
@@ -34,37 +35,6 @@
 #define MMC_CMD23_ARG_TAG_REQ	(1 << 29)
 	u32			resp[4];
 	unsigned int		flags;		/* expected response type */
-#define MMC_RSP_PRESENT	(1 << 0)
-#define MMC_RSP_136	(1 << 1)		/* 136 bit response */
-#define MMC_RSP_CRC	(1 << 2)		/* expect valid crc */
-#define MMC_RSP_BUSY	(1 << 3)		/* card may send busy */
-#define MMC_RSP_OPCODE	(1 << 4)		/* response contains opcode */
-
-#define MMC_CMD_MASK	(3 << 5)		/* non-SPI command type */
-#define MMC_CMD_AC	(0 << 5)
-#define MMC_CMD_ADTC	(1 << 5)
-#define MMC_CMD_BC	(2 << 5)
-#define MMC_CMD_BCR	(3 << 5)
-
-#define MMC_RSP_SPI_S1	(1 << 7)		/* one status byte */
-#define MMC_RSP_SPI_S2	(1 << 8)		/* second byte */
-#define MMC_RSP_SPI_B4	(1 << 9)		/* four data bytes */
-#define MMC_RSP_SPI_BUSY (1 << 10)		/* card may send busy */
-
-/*
- * These are the native response types, and correspond to valid bit
- * patterns of the above flags.  One additional valid pattern
- * is all zeros, which means we don't expect a response.
- */
-#define MMC_RSP_NONE	(0)
-#define MMC_RSP_R1	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-#define MMC_RSP_R1B	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
-#define MMC_RSP_R2	(MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
-#define MMC_RSP_R3	(MMC_RSP_PRESENT)
-#define MMC_RSP_R4	(MMC_RSP_PRESENT)
-#define MMC_RSP_R5	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-#define MMC_RSP_R6	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-#define MMC_RSP_R7	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
 
 /* Can be used by core to poll after switch to MMC HS mode */
 #define MMC_RSP_R1_NO_CRC	(MMC_RSP_PRESENT|MMC_RSP_OPCODE)
@@ -112,6 +82,8 @@
 	unsigned int		busy_timeout;	/* busy detect timeout in ms */
 	/* Set this flag only for blocking sanitize request */
 	bool			sanitize_busy;
+	/* Set this flag only for blocking bkops request */
+	bool			bkops_busy;
 
 	struct mmc_data		*data;		/* data segment associated with cmd */
 	struct mmc_request	*mrq;		/* associated request */
@@ -144,6 +116,7 @@
 	int			sg_count;	/* mapped sg entries */
 	struct scatterlist	*sg;		/* I/O scatter list */
 	s32			host_cookie;	/* host private data */
+	bool			fault_injected; /* fault injected */
 };
 
 struct mmc_host;
@@ -172,6 +145,16 @@
 
 struct mmc_card;
 
+extern void mmc_check_bkops(struct mmc_card *card);
+extern void mmc_start_manual_bkops(struct mmc_card *card);
+extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
+extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *host);
+extern int mmc_try_claim_host(struct mmc_host *host, unsigned int delay);
+extern void __mmc_put_card(struct mmc_card *card);
+extern void mmc_blk_init_bkops_statistics(struct mmc_card *card);
+extern void mmc_deferred_scaling(struct mmc_host *host);
+
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
 		int retries);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index beed712..561e1b5 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -12,17 +12,25 @@
 
 #include <linux/sched.h>
 #include <linux/device.h>
+#include <linux/devfreq.h>
 #include <linux/fault-inject.h>
+#include <linux/blkdev.h>
+#include <linux/extcon.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/card.h>
 #include <linux/mmc/pm.h>
 #include <linux/dma-direction.h>
+#include <linux/mmc/ring_buffer.h>
+
+#define MMC_AUTOSUSPEND_DELAY_MS	3000
 
 struct mmc_ios {
-	unsigned int	clock;			/* clock rate */
+	unsigned int	clock;		/* clock rate */
+	unsigned int	old_rate;	/* saved clock rate */
+	unsigned long	clk_ts;		/* time stamp of last updated clock */
 	unsigned short	vdd;
-	unsigned int	power_delay_ms;		/* waiting for stable power */
+	unsigned int	power_delay_ms;	/* waiting for stable power */
 
 /* vdd stores the bit number of the selected voltage range from below. */
 
@@ -82,7 +90,37 @@
 
 struct mmc_host;
 
+/* states to represent load on the host */
+enum mmc_load {
+	MMC_LOAD_HIGH,
+	MMC_LOAD_LOW,
+};
+
+enum {
+	MMC_ERR_CMD_TIMEOUT,
+	MMC_ERR_CMD_CRC,
+	MMC_ERR_DAT_TIMEOUT,
+	MMC_ERR_DAT_CRC,
+	MMC_ERR_AUTO_CMD,
+	MMC_ERR_ADMA,
+	MMC_ERR_TUNING,
+	MMC_ERR_CMDQ_RED,
+	MMC_ERR_CMDQ_GCE,
+	MMC_ERR_CMDQ_ICCE,
+	MMC_ERR_REQ_TIMEOUT,
+	MMC_ERR_CMDQ_REQ_TIMEOUT,
+	MMC_ERR_ICE_CFG,
+	MMC_ERR_MAX,
+};
+
 struct mmc_host_ops {
+	int (*init)(struct mmc_host *host);
+	/*
+	 * 'enable' is called when the host is claimed and 'disable' is called
+	 * when the host is released. 'enable' and 'disable' are deprecated.
+	 */
+	int (*enable)(struct mmc_host *host);
+	int (*disable)(struct mmc_host *host);
 	/*
 	 * It is optional for the host to implement pre_req and post_req in
 	 * order to support double buffering of requests (prepare one
@@ -146,6 +184,7 @@
 
 	/* Prepare HS400 target operating frequency depending host driver */
 	int	(*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+	int	(*enhanced_strobe)(struct mmc_host *host);
 
 	/* Prepare for switching from HS400 to HS200 */
 	void	(*hs400_downgrade)(struct mmc_host *host);
@@ -168,6 +207,13 @@
 	 */
 	int	(*multi_io_quirk)(struct mmc_card *card,
 				  unsigned int direction, int blk_size);
+
+	unsigned long (*get_max_frequency)(struct mmc_host *host);
+	unsigned long (*get_min_frequency)(struct mmc_host *host);
+
+	int	(*notify_load)(struct mmc_host *mmc, enum mmc_load);
+	void	(*notify_halt)(struct mmc_host *mmc, bool halt);
+	void	(*force_err_irq)(struct mmc_host *host, u64 errmask);
 };
 
 struct mmc_cqe_ops {
@@ -247,12 +293,14 @@
  * @is_new_req		wake up reason was new request
  * @is_waiting_last_req	mmc context waiting for single running request
  * @wait		wait queue
+ * @lock		lock to protect data fields
  */
 struct mmc_context_info {
 	bool			is_done_rcv;
 	bool			is_new_req;
 	bool			is_waiting_last_req;
 	wait_queue_head_t	wait;
+	spinlock_t		lock;
 };
 
 struct regulator;
@@ -267,9 +315,67 @@
 	struct task_struct *task;
 };
 
+enum dev_state {
+	DEV_SUSPENDING = 1,
+	DEV_SUSPENDED,
+	DEV_RESUMED,
+};
+
+/**
+ * struct mmc_devfeq_clk_scaling - main context for MMC clock scaling logic
+ *
+ * @lock: spinlock to protect statistics
+ * @devfreq: struct that represent mmc-host as a client for devfreq
+ * @devfreq_profile: MMC device profile, mostly polling interval and callbacks
+ * @ondemand_gov_data: struct supplied to ondemmand governor (thresholds)
+ * @state: load state, can be HIGH or LOW. used to notify mmc_host_ops callback
+ * @start_busy: timestamped armed once a data request is started
+ * @measure_interval_start: timestamped armed once a measure interval started
+ * @devfreq_abort: flag to sync between different contexts relevant to devfreq
+ * @skip_clk_scale_freq_update: flag that enable/disable frequency change
+ * @freq_table_sz: table size of frequencies supplied to devfreq
+ * @freq_table: frequencies table supplied to devfreq
+ * @curr_freq: current frequency
+ * @polling_delay_ms: polling interval for status collection used by devfreq
+ * @upthreshold: up-threshold supplied to ondemand governor
+ * @downthreshold: down-threshold supplied to ondemand governor
+ * @need_freq_change: flag indicating if a frequency change is required
+ * @is_busy_started: flag indicating if a request is handled by the HW
+ * @enable: flag indicating if the clock scaling logic is enabled for this host
+ * @is_suspended: to make devfreq request queued when mmc is suspened
+ */
+struct mmc_devfeq_clk_scaling {
+	spinlock_t	lock;
+	struct		devfreq *devfreq;
+	struct		devfreq_dev_profile devfreq_profile;
+	struct		devfreq_simple_ondemand_data ondemand_gov_data;
+	enum mmc_load	state;
+	ktime_t		start_busy;
+	ktime_t		measure_interval_start;
+	atomic_t	devfreq_abort;
+	bool		skip_clk_scale_freq_update;
+	int		freq_table_sz;
+	int		pltfm_freq_table_sz;
+	u32		*freq_table;
+	u32		*pltfm_freq_table;
+	unsigned long	total_busy_time_us;
+	unsigned long	target_freq;
+	unsigned long	curr_freq;
+	unsigned long	polling_delay_ms;
+	unsigned int	upthreshold;
+	unsigned int	downthreshold;
+	unsigned int	lower_bus_speed_mode;
+#define MMC_SCALING_LOWER_DDR52_MODE	1
+	bool		need_freq_change;
+	bool		is_busy_started;
+	bool		enable;
+	bool		is_suspended;
+};
+
 struct mmc_host {
 	struct device		*parent;
 	struct device		class_dev;
+	struct mmc_devfeq_clk_scaling	clk_scaling;
 	int			index;
 	const struct mmc_host_ops *ops;
 	struct mmc_pwrseq	*pwrseq;
@@ -347,10 +453,17 @@
 #define MMC_CAP2_FULL_PWR_CYCLE	(1 << 2)	/* Can do full power cycle */
 #define MMC_CAP2_HS200_1_8V_SDR	(1 << 5)        /* can support */
 #define MMC_CAP2_HS200_1_2V_SDR	(1 << 6)        /* can support */
+#define MMC_CAP2_SLEEP_AWAKE    (1 << 7)       /* Use Sleep/Awake (CMD5) */
+/* use max discard ignoring max_busy_timeout parameter */
+#define MMC_CAP2_MAX_DISCARD_SIZE       (1 << 8)
 #define MMC_CAP2_HS200		(MMC_CAP2_HS200_1_8V_SDR | \
 				 MMC_CAP2_HS200_1_2V_SDR)
 #define MMC_CAP2_CD_ACTIVE_HIGH	(1 << 10)	/* Card-detect signal active high */
 #define MMC_CAP2_RO_ACTIVE_HIGH	(1 << 11)	/* Write-protect signal active high */
+#define MMC_CAP2_PACKED_RD      (1 << 12)       /* Allow packed read */
+#define MMC_CAP2_PACKED_WR      (1 << 13)       /* Allow packed write */
+#define MMC_CAP2_PACKED_CMD	(MMC_CAP2_PACKED_RD | \
+				MMC_CAP2_PACKED_WR)
 #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14)	/* Don't power up before scan */
 #define MMC_CAP2_HS400_1_8V	(1 << 15)	/* Can support HS400 1.8V */
 #define MMC_CAP2_HS400_1_2V	(1 << 16)	/* Can support HS400 1.2V */
@@ -367,11 +480,31 @@
 #define MMC_CAP2_CQE		(1 << 23)	/* Has eMMC command queue engine */
 #define MMC_CAP2_CQE_DCMD	(1 << 24)	/* CQE can issue a direct command */
 #define MMC_CAP2_AVOID_3_3V	(1 << 25)	/* Host must negotiate down from 3.3V */
-
+#define MMC_CAP2_PACKED_WR_CONTROL (1 << 26)    /* Allow write packed control */
+#define MMC_CAP2_CLK_SCALE      (1 << 27)       /* Allow dynamic clk scaling */
+#define MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE (1 << 28) /* Allow Async SDIO irq */
+						/* while card in 4-bit mode */
+#define MMC_CAP2_NONHOTPLUG     (1 << 29)       /*Don't support hotplug*/
+/* Some hosts need additional tuning */
+#define MMC_CAP2_HS400_POST_TUNING      (1 << 30)
+#define MMC_CAP2_SANITIZE       (1 << 31)               /* Support Sanitize */
 	int			fixed_drv_type;	/* fixed driver type for non-removable media */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct workqueue_struct *clk_gate_wq;	/* clock gate work queue */
+	struct delayed_work	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+	struct device_attribute clkgate_delay_attr;
+	unsigned long           clkgate_delay;
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -385,6 +518,7 @@
 	spinlock_t		lock;		/* lock for claim and bus ops */
 
 	struct mmc_ios		ios;		/* current io bus settings */
+	struct mmc_ios		cached_ios;
 
 	/* group bitfields together to minimize padding */
 	unsigned int		use_spi_crc:1;
@@ -420,6 +554,11 @@
 	const struct mmc_bus_ops *bus_ops;	/* current bus driver */
 	unsigned int		bus_refs;	/* reference counter */
 
+	unsigned int		bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME	(1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME	(1 << 1)
+	bool ignore_bus_resume_flags;
+
 	unsigned int		sdio_irqs;
 	struct task_struct	*sdio_irq_thread;
 	struct delayed_work	sdio_irq_work;
@@ -437,6 +576,9 @@
 
 	struct dentry		*debugfs_root;
 
+	bool			err_occurred;
+	u32			err_stats[MMC_ERR_MAX];
+
 	/* Ongoing data transfer that allows commands during transfer */
 	struct mmc_request	*ongoing_mrq;
 
@@ -458,12 +600,36 @@
 	bool			cqe_enabled;
 	bool			cqe_on;
 
+	/*
+	 * Set to 1 to just stop the SDCLK to the card without
+	 * actually disabling the clock from it's source.
+	 */
+	bool			card_clock_off;
+	struct extcon_dev	*extcon;
+	struct notifier_block card_detect_nb;
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+	struct {
+
+		unsigned long rbytes_drv;  /* Rd bytes MMC Host  */
+		unsigned long wbytes_drv;  /* Wr bytes MMC Host  */
+		ktime_t rtime_drv;	   /* Rd time  MMC Host  */
+		ktime_t wtime_drv;	   /* Wr time  MMC Host  */
+		ktime_t start;
+	} perf;
+	bool perf_enable;
+#endif
+	struct mmc_trace_buffer trace_buf;
+	enum dev_state dev_status;
+	bool inlinecrypt_support;  /* Inline encryption support */
+	bool crash_on_err;	/* crash the system on error */
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
 struct device_node;
 
 struct mmc_host *mmc_alloc_host(int extra, struct device *);
+extern bool mmc_host_may_gate_card(struct mmc_card *card);
 int mmc_add_host(struct mmc_host *);
 void mmc_remove_host(struct mmc_host *);
 void mmc_free_host(struct mmc_host *);
@@ -480,6 +646,22 @@
 #define mmc_dev(x)	((x)->parent)
 #define mmc_classdev(x)	(&(x)->class_dev)
 #define mmc_hostname(x)	(dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & \
+				    MMC_BUSRESUME_NEEDS_RESUME)
+#define mmc_bus_manual_resume(host) ((host)->bus_resume_flags & \
+				MMC_BUSRESUME_MANUAL_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+	if (manual)
+		host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+	else
+		host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
+
+extern int mmc_resume_bus(struct mmc_host *host);
 
 void mmc_detect_change(struct mmc_host *, unsigned long delay);
 void mmc_request_done(struct mmc_host *, struct mmc_request *);
@@ -542,7 +724,42 @@
 	return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
 }
 
-/* TODO: Move to private header */
+static inline bool mmc_card_and_host_support_async_int(struct mmc_host *host)
+{
+	return ((host->caps2 & MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) &&
+			(host->card->cccr.async_intr_sup));
+}
+
+static inline void mmc_host_clear_sdr104(struct mmc_host *host)
+{
+	host->caps &= ~MMC_CAP_UHS_SDR104;
+}
+
+static inline void mmc_host_set_sdr104(struct mmc_host *host)
+{
+	host->caps |= MMC_CAP_UHS_SDR104;
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_hold(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_release(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 static inline int mmc_card_hs(struct mmc_card *card)
 {
 	return card->host->ios.timing == MMC_TIMING_SD_HS ||
@@ -556,6 +773,8 @@
 		card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
 }
 
+void mmc_retune_enable(struct mmc_host *host);
+void mmc_retune_disable(struct mmc_host *host);
 void mmc_retune_timer_stop(struct mmc_host *host);
 
 static inline void mmc_retune_needed(struct mmc_host *host)
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 897a87c..5711039 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -25,66 +25,7 @@
 #define LINUX_MMC_MMC_H
 
 #include <linux/types.h>
-
-/* Standard MMC commands (4.1)           type  argument     response */
-   /* class 1 */
-#define MMC_GO_IDLE_STATE         0   /* bc                          */
-#define MMC_SEND_OP_COND          1   /* bcr  [31:0] OCR         R3  */
-#define MMC_ALL_SEND_CID          2   /* bcr                     R2  */
-#define MMC_SET_RELATIVE_ADDR     3   /* ac   [31:16] RCA        R1  */
-#define MMC_SET_DSR               4   /* bc   [31:16] RCA            */
-#define MMC_SLEEP_AWAKE		  5   /* ac   [31:16] RCA 15:flg R1b */
-#define MMC_SWITCH                6   /* ac   [31:0] See below   R1b */
-#define MMC_SELECT_CARD           7   /* ac   [31:16] RCA        R1  */
-#define MMC_SEND_EXT_CSD          8   /* adtc                    R1  */
-#define MMC_SEND_CSD              9   /* ac   [31:16] RCA        R2  */
-#define MMC_SEND_CID             10   /* ac   [31:16] RCA        R2  */
-#define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
-#define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
-#define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
-#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
-#define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
-#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
-#define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
-#define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
-
-  /* class 2 */
-#define MMC_SET_BLOCKLEN         16   /* ac   [31:0] block len   R1  */
-#define MMC_READ_SINGLE_BLOCK    17   /* adtc [31:0] data addr   R1  */
-#define MMC_READ_MULTIPLE_BLOCK  18   /* adtc [31:0] data addr   R1  */
-#define MMC_SEND_TUNING_BLOCK    19   /* adtc                    R1  */
-#define MMC_SEND_TUNING_BLOCK_HS200	21	/* adtc R1  */
-
-  /* class 3 */
-#define MMC_WRITE_DAT_UNTIL_STOP 20   /* adtc [31:0] data addr   R1  */
-
-  /* class 4 */
-#define MMC_SET_BLOCK_COUNT      23   /* adtc [31:0] data addr   R1  */
-#define MMC_WRITE_BLOCK          24   /* adtc [31:0] data addr   R1  */
-#define MMC_WRITE_MULTIPLE_BLOCK 25   /* adtc                    R1  */
-#define MMC_PROGRAM_CID          26   /* adtc                    R1  */
-#define MMC_PROGRAM_CSD          27   /* adtc                    R1  */
-
-  /* class 6 */
-#define MMC_SET_WRITE_PROT       28   /* ac   [31:0] data addr   R1b */
-#define MMC_CLR_WRITE_PROT       29   /* ac   [31:0] data addr   R1b */
-#define MMC_SEND_WRITE_PROT      30   /* adtc [31:0] wpdata addr R1  */
-
-  /* class 5 */
-#define MMC_ERASE_GROUP_START    35   /* ac   [31:0] data addr   R1  */
-#define MMC_ERASE_GROUP_END      36   /* ac   [31:0] data addr   R1  */
-#define MMC_ERASE                38   /* ac                      R1b */
-
-  /* class 9 */
-#define MMC_FAST_IO              39   /* ac   <Complex>          R4  */
-#define MMC_GO_IRQ_STATE         40   /* bcr                     R5  */
-
-  /* class 7 */
-#define MMC_LOCK_UNLOCK          42   /* adtc                    R1b */
-
-  /* class 8 */
-#define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
-#define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
+#include <uapi/linux/mmc/mmc.h>
 
   /* class 11 */
 #define MMC_QUE_TASK_PARAMS      44   /* ac   [20:16] task id    R1  */
@@ -188,6 +129,7 @@
  * OCR bits are mostly in host.h
  */
 #define MMC_CARD_BUSY	0x80000000	/* Card Power up status bit */
+#define MMC_CARD_SECTOR_ADDR 0x40000000 /* Card supports sectors */
 
 /*
  * Card Command Classes (CCC)
@@ -291,6 +233,7 @@
 #define EXT_CSD_PWR_CL_200_360		237	/* RO */
 #define EXT_CSD_PWR_CL_DDR_52_195	238	/* RO */
 #define EXT_CSD_PWR_CL_DDR_52_360	239	/* RO */
+#define EXT_CSD_CACHE_FLUSH_POLICY	240	/* RO */
 #define EXT_CSD_BKOPS_STATUS		246	/* RO */
 #define EXT_CSD_POWER_OFF_LONG_TIME	247	/* RO */
 #define EXT_CSD_GENERIC_CMD6_TIME	248	/* RO */
@@ -314,7 +257,8 @@
  * EXT_CSD field definitions
  */
 
-#define EXT_CSD_WR_REL_PARAM_EN		(1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN			(1<<2)
+#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR	(1<<4)
 
 #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS	(0x40)
 #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS	(0x10)
@@ -387,6 +331,9 @@
 
 #define EXT_CSD_PACKED_EVENT_EN	BIT(3)
 
+#define EXT_CSD_BKOPS_MANUAL_EN		BIT(0)
+#define EXT_CSD_BKOPS_AUTO_EN		BIT(1)
+
 /*
  * EXCEPTION_EVENT_STATUS field
  */
diff --git a/include/linux/mmc/ring_buffer.h b/include/linux/mmc/ring_buffer.h
new file mode 100644
index 0000000..b08622ac
--- /dev/null
+++ b/include/linux/mmc/ring_buffer.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MMC_RING_BUFFER__
+#define __MMC_RING_BUFFER__
+
+#include <linux/mmc/card.h>
+#include <linux/smp.h>
+
+#include "core.h"
+
+#define MMC_TRACE_RBUF_SZ_ORDER	2	/* 2^2 pages */
+#define MMC_TRACE_RBUF_SZ	(PAGE_SIZE * (1 << MMC_TRACE_RBUF_SZ_ORDER))
+#define MMC_TRACE_EVENT_SZ	256
+#define MMC_TRACE_RBUF_NUM_EVENTS	(MMC_TRACE_RBUF_SZ / MMC_TRACE_EVENT_SZ)
+
+struct mmc_host;
+struct mmc_trace_buffer {
+	int	wr_idx;
+	bool stop_tracing;
+	spinlock_t trace_lock;
+	char *data;
+};
+
+#ifdef CONFIG_MMC_RING_BUFFER
+void mmc_stop_tracing(struct mmc_host *mmc);
+void mmc_trace_write(struct mmc_host *mmc, const char *fmt, ...);
+void mmc_trace_init(struct mmc_host *mmc);
+void mmc_trace_free(struct mmc_host *mmc);
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s);
+#else
+static inline void mmc_stop_tracing(struct mmc_host *mmc) {}
+static inline void mmc_trace_write(struct mmc_host *mmc,
+		const char *fmt, ...) {}
+static inline void mmc_trace_init(struct mmc_host *mmc) {}
+static inline void mmc_trace_free(struct mmc_host *mmc) {}
+static inline void mmc_dump_trace_buffer(struct mmc_host *mmc,
+		struct seq_file *s) {}
+#endif
+
+#define MMC_TRACE(mmc, fmt, ...) \
+		mmc_trace_write(mmc, fmt, ##__VA_ARGS__)
+
+#endif /* __MMC_RING_BUFFER__ */
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 17446d3..8d7e5cf 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -102,6 +102,7 @@
 #define  SDIO_BUS_WIDTH_1BIT	0x00
 #define  SDIO_BUS_WIDTH_RESERVED 0x01
 #define  SDIO_BUS_WIDTH_4BIT	0x02
+#define  SDIO_BUS_WIDTH_8BIT	0x03
 #define  SDIO_BUS_ECSI		0x20	/* Enable continuous SPI interrupt */
 #define  SDIO_BUS_SCSI		0x40	/* Support continuous SPI interrupt */
 
@@ -163,6 +164,10 @@
 #define  SDIO_DTSx_SET_TYPE_A	(1 << SDIO_DRIVE_DTSx_SHIFT)
 #define  SDIO_DTSx_SET_TYPE_C	(2 << SDIO_DRIVE_DTSx_SHIFT)
 #define  SDIO_DTSx_SET_TYPE_D	(3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXTENSION	0x16
+#define	SDIO_SUPPORT_ASYNC_INTR		(1<<0)
+#define	SDIO_ENABLE_ASYNC_INTR		(1<<1)
 /*
  * Function Basic Registers (FBR)
  */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 06607c5..34c48e1 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -35,5 +35,7 @@
 void mmc_gpiod_request_cd_irq(struct mmc_host *host);
 bool mmc_can_gpio_cd(struct mmc_host *host);
 bool mmc_can_gpio_ro(struct mmc_host *host);
+void mmc_register_extcon(struct mmc_host *host);
+void mmc_unregister_extcon(struct mmc_host *host);
 
 #endif
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 1fdd2e5..fd39487 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -748,6 +748,36 @@
 };
 
 /**
+ * gsi_wdi3_channel_scratch - WDI protocol 3 SW config area of
+ * channel scratch
+ *
+ * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
+ * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
+ * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
+ *                                  N is the number of packets that IPA will
+ *                                  process before Wifi transfer ring Ri will
+ *                                  be updated.
+ * @resv: reserved bits.
+ * @rx_pkt_offset: Rx only, Since Rx header length is not fixed,
+ *                  WLAN host will pass this information to IPA.
+ * @endp_metadata_reg_offset: Rx only, the offset of
+ *                 IPA_ENDP_INIT_HDR_METADATA_n of the
+ *                 corresponding endpoint in 4B words from IPA
+ *                 base address.
+ * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
+ *           for MCS. Write for SW.
+ */
+struct __packed gsi_wdi3_channel_scratch {
+	uint32_t wifi_rp_address_low;
+	uint32_t wifi_rp_address_high;
+	uint32_t update_rp_moderation_threshold : 5;
+	uint32_t reserved : 11;
+	uint32_t rx_pkt_offset : 16;
+	uint32_t endp_metadata_reg_offset : 16;
+	uint32_t qmap_id : 16;
+};
+
+/**
  * gsi_channel_scratch - channel scratch SW config area
  *
  */
@@ -758,6 +788,7 @@
 	struct __packed gsi_wdi_channel_scratch wdi;
 	struct __packed gsi_11ad_rx_channel_scratch rx_11ad;
 	struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
+	struct __packed gsi_wdi3_channel_scratch wdi3;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -837,6 +868,22 @@
 };
 
 /**
+ * gsi_wdi3_evt_scratch - wdi3 protocol SW config area of
+ * event scratch
+ * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
+ *                                  N is the number of packets that IPA will
+ *                                  process before Wifi transfer ring Ri will
+ *                                  be updated.
+ * @reserved1: reserve bit.
+ * @reserved2: reserve bit.
+ */
+struct __packed gsi_wdi3_evt_scratch {
+	uint32_t update_rp_moderation_config : 8;
+	uint32_t reserved1 : 24;
+	uint32_t reserved2;
+};
+
+/**
  * gsi_evt_scratch - event scratch SW config area
  *
  */
@@ -845,6 +892,7 @@
 	struct __packed gsi_xdci_evt_scratch xdci;
 	struct __packed gsi_wdi_evt_scratch wdi;
 	struct __packed gsi_11ad_evt_scratch w11ad;
+	struct __packed gsi_wdi3_evt_scratch wdi3;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -1403,6 +1451,24 @@
 int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
 
 /**
+ * gsi_wdi3_write_evt_ring_db - write event ring doorbell address
+ *
+ * @chan_hdl: gsi channel handle
+ * @Return gsi_status
+ */
+void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
+	uint32_t db_addr_high);
+
+
+/**
+ * gsi_wdi3_dump_register - dump wdi3 related gsi registers
+ *
+ * @chan_hdl: gsi channel handle
+ */
+void gsi_wdi3_dump_register(unsigned long chan_hdl);
+
+
+/**
  * gsi_map_base - Peripheral should call this function to configure
  * access to the GSI registers.
 
@@ -1704,10 +1770,20 @@
 }
 
 static inline int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee,
-	 int *code)
+	int *code)
 {
 	return -GSI_STATUS_UNSUPPORTED_OP;
 }
 
+static inline void gsi_wdi3_write_evt_ring_db(
+	unsigned long chan_hdl, uint32_t db_addr_low,
+	uint32_t db_addr_high)
+{
+}
+
+static inline void gsi_wdi3_dump_register(unsigned long chan_hdl)
+{
+}
+
 #endif
 #endif
diff --git a/include/linux/nfcinfo.h b/include/linux/nfcinfo.h
new file mode 100644
index 0000000..1417c9b
--- /dev/null
+++ b/include/linux/nfcinfo.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _NFCINFO_H
+#define _NFCINFO_H
+
+#include <uapi/linux/nfc/nfcinfo.h>
+
+#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 81bc77c..42ffd3a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -34,6 +34,9 @@
 /* Indicate backport support for FILS SK offload in cfg80211 */
 #define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1
 
+/* Indicate backport support for external authentication*/
+#define CFG80211_EXTERNAL_AUTH_SUPPORT 1
+
 /**
  * DOC: Introduction
  *
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
index 7b706ff..988011f 100644
--- a/include/trace/events/mmc.h
+++ b/include/trace/events/mmc.h
@@ -187,7 +187,152 @@
 		  __entry->hold_retune, __entry->retune_period)
 );
 
-#endif /* _TRACE_MMC_H */
+TRACE_EVENT(mmc_cmd_rw_start,
+	TP_PROTO(unsigned int cmd, unsigned int arg, unsigned int flags),
+	TP_ARGS(cmd, arg, flags),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, arg)
+		__field(unsigned int, flags)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->arg = arg;
+		__entry->flags = flags;
+	),
+	TP_printk("cmd=%u,arg=0x%08x,flags=0x%08x",
+		  __entry->cmd, __entry->arg, __entry->flags)
+);
+
+TRACE_EVENT(mmc_cmd_rw_end,
+	TP_PROTO(unsigned int cmd, unsigned int status, unsigned int resp),
+	TP_ARGS(cmd, status, resp),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, status)
+		__field(unsigned int, resp)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->status = status;
+		__entry->resp = resp;
+	),
+	TP_printk("cmd=%u,int_status=0x%08x,response=0x%08x",
+		  __entry->cmd, __entry->status, __entry->resp)
+);
+
+TRACE_EVENT(mmc_data_rw_end,
+	TP_PROTO(unsigned int cmd, unsigned int status),
+	TP_ARGS(cmd, status),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, status)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->status = status;
+	),
+	TP_printk("cmd=%u,int_status=0x%08x",
+		  __entry->cmd, __entry->status)
+);
+
+DECLARE_EVENT_CLASS(mmc_adma_class,
+	TP_PROTO(unsigned int cmd, unsigned int len),
+	TP_ARGS(cmd, len),
+	TP_STRUCT__entry(
+		__field(unsigned int, cmd)
+		__field(unsigned int, len)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->len = len;
+	),
+	TP_printk("cmd=%u,sg_len=0x%08x", __entry->cmd, __entry->len)
+);
+
+DEFINE_EVENT(mmc_adma_class, mmc_adma_table_pre,
+	TP_PROTO(unsigned int cmd, unsigned int len),
+	TP_ARGS(cmd, len));
+
+DEFINE_EVENT(mmc_adma_class, mmc_adma_table_post,
+	TP_PROTO(unsigned int cmd, unsigned int len),
+	TP_ARGS(cmd, len));
+
+TRACE_EVENT(mmc_clk,
+	TP_PROTO(char *print_info),
+
+	TP_ARGS(print_info),
+
+	TP_STRUCT__entry(
+		__string(print_info, print_info)
+	),
+
+	TP_fast_assign(
+		__assign_str(print_info, print_info);
+	),
+
+	TP_printk("%s",
+		__get_str(print_info)
+	)
+);
+
+DECLARE_EVENT_CLASS(mmc_pm_template,
+	TP_PROTO(const char *dev_name, int err, s64 usecs),
+
+	TP_ARGS(dev_name, err, usecs),
+
+	TP_STRUCT__entry(
+		__field(s64, usecs)
+		__field(int, err)
+		__string(dev_name, dev_name)
+	),
+
+	TP_fast_assign(
+		__entry->usecs = usecs;
+		__entry->err = err;
+		__assign_str(dev_name, dev_name);
+	),
+
+	TP_printk(
+		"took %lld usecs, %s err %d",
+		__entry->usecs,
+		__get_str(dev_name),
+		__entry->err
+	)
+);
+
+DEFINE_EVENT(mmc_pm_template, mmc_runtime_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_runtime_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, mmc_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_runtime_suspend,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+
+DEFINE_EVENT(mmc_pm_template, sdhci_msm_runtime_resume,
+	     TP_PROTO(const char *dev_name, int err, s64 usecs),
+	     TP_ARGS(dev_name, err, usecs));
+#endif /* if !defined(_TRACE_MMC_H) || defined(TRACE_HEADER_MULTI_READ) */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 50ab515..5ad6e13 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1180,10 +1180,11 @@
 
 	TP_PROTO(struct task_struct *p, int best_energy_cpu,
 		bool sync, bool need_idle, int fastpath,
-		bool placement_boost, int rtg_cpu, u64 start_t),
+		bool placement_boost, int rtg_cpu, u64 start_t,
+		bool stune_boosted),
 
 	TP_ARGS(p, best_energy_cpu, sync, need_idle, fastpath,
-		placement_boost, rtg_cpu, start_t),
+		placement_boost, rtg_cpu, start_t, stune_boosted),
 
 	TP_STRUCT__entry(
 		__field(int,		pid)
@@ -1197,6 +1198,7 @@
 		__field(int,		placement_boost)
 		__field(int,		rtg_cpu)
 		__field(u64,		latency)
+		__field(bool,		stune_boosted)
 	),
 
 	TP_fast_assign(
@@ -1211,13 +1213,14 @@
 		__entry->placement_boost        = placement_boost;
 		__entry->rtg_cpu                = rtg_cpu;
 		__entry->latency                = (sched_clock() - start_t);
+		__entry->stune_boosted          = stune_boosted;
 	),
 
-	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu",
+	TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu stune_boosted=%d",
 		__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
 		__entry->best_energy_cpu, __entry->sync, __entry->need_idle,
 		__entry->fastpath, __entry->placement_boost, __entry->rtg_cpu,
-		__entry->latency)
+		__entry->latency, __entry->stune_boosted)
 )
 
 /*
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index e81b23a..8045240 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -12,6 +12,8 @@
 no-export-headers += kvm_para.h
 endif
 
+header-y += nfc/
+
 ifneq ($(VSERVICES_SUPPORT), "")
 include include/linux/Kbuild.vservices
 endif
diff --git a/include/uapi/linux/mmc/Kbuild b/include/uapi/linux/mmc/Kbuild
new file mode 100644
index 0000000..8d910dd
--- /dev/null
+++ b/include/uapi/linux/mmc/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+# UAPI Header export list
+header-y += core.h
+header-y += core.h
+header-y += ioctl.h
+header-y += mmc.h
+header-y += mmc.h
diff --git a/include/uapi/linux/mmc/core.h b/include/uapi/linux/mmc/core.h
new file mode 100644
index 0000000..72fd7b7
--- /dev/null
+++ b/include/uapi/linux/mmc/core.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+#ifndef UAPI_MMC_CORE_H
+#define UAPI_MMC_CORE_H
+
+#define MMC_RSP_PRESENT	(1 << 0)
+#define MMC_RSP_136	(1 << 1)		/* 136 bit response */
+#define MMC_RSP_CRC	(1 << 2)		/* expect valid crc */
+#define MMC_RSP_BUSY	(1 << 3)		/* card may send busy */
+#define MMC_RSP_OPCODE	(1 << 4)		/* response contains opcode */
+
+#define MMC_CMD_MASK	(3 << 5)		/* non-SPI command type */
+#define MMC_CMD_AC	(0 << 5)
+#define MMC_CMD_ADTC	(1 << 5)
+#define MMC_CMD_BC	(2 << 5)
+#define MMC_CMD_BCR	(3 << 5)
+
+#define MMC_RSP_SPI_S1	(1 << 7)		/* one status byte */
+#define MMC_RSP_SPI_S2	(1 << 8)		/* second byte */
+#define MMC_RSP_SPI_B4	(1 << 9)		/* four data bytes */
+#define MMC_RSP_SPI_BUSY (1 << 10)		/* card may send busy */
+
+/*
+ * These are the native response types, and correspond to valid bit
+ * patterns of the above flags.  One additional valid pattern
+ * is all zeros, which means we don't expect a response.
+ */
+#define MMC_RSP_NONE	(0)
+#define MMC_RSP_R1	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R1B	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|\
+			MMC_RSP_BUSY)
+#define MMC_RSP_R2	(MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
+#define MMC_RSP_R3	(MMC_RSP_PRESENT)
+#define MMC_RSP_R4	(MMC_RSP_PRESENT)
+#define MMC_RSP_R5	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R6	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R7	(MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+
+#endif /* UAPI_MMC_CORE_H */
diff --git a/include/uapi/linux/mmc/mmc.h b/include/uapi/linux/mmc/mmc.h
new file mode 100644
index 0000000..68fa9da
--- /dev/null
+++ b/include/uapi/linux/mmc/mmc.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+#ifndef UAPI_MMC_MMC_H
+#define UAPI_MMC_MMC_H
+
+/* Standard MMC commands (4.1)           type  argument     response */
+   /* class 1 */
+#define MMC_GO_IDLE_STATE         0   /* bc                          */
+#define MMC_SEND_OP_COND          1   /* bcr  [31:0] OCR         R3  */
+#define MMC_ALL_SEND_CID          2   /* bcr                     R2  */
+#define MMC_SET_RELATIVE_ADDR     3   /* ac   [31:16] RCA        R1  */
+#define MMC_SET_DSR               4   /* bc   [31:16] RCA            */
+#define MMC_SLEEP_AWAKE		  5   /* ac   [31:16] RCA 15:flg R1b */
+#define MMC_SWITCH                6   /* ac   [31:0] See below   R1b */
+#define MMC_SELECT_CARD           7   /* ac   [31:16] RCA        R1  */
+#define MMC_SEND_EXT_CSD          8   /* adtc                    R1  */
+#define MMC_SEND_CSD              9   /* ac   [31:16] RCA        R2  */
+#define MMC_SEND_CID             10   /* ac   [31:16] RCA        R2  */
+#define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
+#define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
+#define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
+#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
+#define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
+#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
+#define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
+#define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
+
+  /* class 2 */
+#define MMC_SET_BLOCKLEN         16   /* ac   [31:0] block len   R1  */
+#define MMC_READ_SINGLE_BLOCK    17   /* adtc [31:0] data addr   R1  */
+#define MMC_READ_MULTIPLE_BLOCK  18   /* adtc [31:0] data addr   R1  */
+#define MMC_SEND_TUNING_BLOCK    19   /* adtc                    R1  */
+#define MMC_SEND_TUNING_BLOCK_HS200	21	/* adtc R1  */
+#define MMC_SEND_TUNING_BLOCK_HS400      MMC_SEND_TUNING_BLOCK_HS200
+
+  /* class 3 */
+#define MMC_WRITE_DAT_UNTIL_STOP 20   /* adtc [31:0] data addr   R1  */
+
+  /* class 4 */
+#define MMC_SET_BLOCK_COUNT      23   /* adtc [31:0] data addr   R1  */
+#define MMC_WRITE_BLOCK          24   /* adtc [31:0] data addr   R1  */
+#define MMC_WRITE_MULTIPLE_BLOCK 25   /* adtc                    R1  */
+#define MMC_PROGRAM_CID          26   /* adtc                    R1  */
+#define MMC_PROGRAM_CSD          27   /* adtc                    R1  */
+
+  /* class 6 */
+#define MMC_SET_WRITE_PROT       28   /* ac   [31:0] data addr   R1b */
+#define MMC_CLR_WRITE_PROT       29   /* ac   [31:0] data addr   R1b */
+#define MMC_SEND_WRITE_PROT      30   /* adtc [31:0] wpdata addr R1  */
+
+  /* class 5 */
+#define MMC_ERASE_GROUP_START    35   /* ac   [31:0] data addr   R1  */
+#define MMC_ERASE_GROUP_END      36   /* ac   [31:0] data addr   R1  */
+#define MMC_ERASE                38   /* ac                      R1b */
+
+  /* class 9 */
+#define MMC_FAST_IO              39   /* ac   <Complex>          R4  */
+#define MMC_GO_IRQ_STATE         40   /* bcr                     R5  */
+
+  /* class 7 */
+#define MMC_LOCK_UNLOCK          42   /* adtc                    R1b */
+
+  /* class 8 */
+#define MMC_APP_CMD              55   /* ac   [31:16] RCA        R1  */
+#define MMC_GEN_CMD              56   /* adtc [0] RD/WR          R1  */
+
+#endif /* UAPI_MMC_MMC_H */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index a48e4b6..9f26c28 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -349,6 +349,7 @@
 
 #define IPA_CLIENT_MAX (IPA_CLIENT_APPS_WAN_COAL_CONS + 1)
 
+#define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
 #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD
 #define IPA_CLIENT_Q6_UL_NLO_ACK_CONS IPA_CLIENT_Q6_UL_NLO_ACK_CONS
 #define IPA_CLIENT_Q6_QBAP_STATUS_CONS IPA_CLIENT_Q6_QBAP_STATUS_CONS
diff --git a/include/uapi/linux/nfc/Kbuild b/include/uapi/linux/nfc/Kbuild
new file mode 100644
index 0000000..d3fbe3c
--- /dev/null
+++ b/include/uapi/linux/nfc/Kbuild
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+#UAPI export list
+header-y += nfcinfo.h
diff --git a/include/uapi/linux/nfc/nfcinfo.h b/include/uapi/linux/nfc/nfcinfo.h
new file mode 100644
index 0000000..4ac6302
--- /dev/null
+++ b/include/uapi/linux/nfc/nfcinfo.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2018,2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_NFCINFO_H_
+#define _UAPI_NFCINFO_H_
+
+#include <linux/ioctl.h>
+
+#define NFCC_MAGIC 0xE9
+#define NFCC_GET_INFO _IOW(NFCC_MAGIC, 0x09, unsigned int)
+
+struct nqx_devinfo {
+	unsigned char chip_type;
+	unsigned char rom_version;
+	unsigned char fw_major;
+	unsigned char fw_minor;
+};
+
+union nqx_uinfo {
+	unsigned int i;
+	struct nqx_devinfo info;
+};
+
+#endif
diff --git a/include/uapi/linux/spcom.h b/include/uapi/linux/spcom.h
index 290585c..8fde38e5 100644
--- a/include/uapi/linux/spcom.h
+++ b/include/uapi/linux/spcom.h
@@ -50,6 +50,9 @@
 	SPCOM_CMD_UNLOCK_ION_BUF = 0x554C434B, /* "ULCK" = 0x4C4F434B */
 	SPCOM_CMD_FSSR		= 0x46535352, /* "FSSR" = 0x46535352 */
 	SPCOM_CMD_CREATE_CHANNEL = 0x43524554, /* "CRET" = 0x43524554 */
+#define SPCOM_CMD_RESTART_SP \
+	SPCOM_CMD_RESTART_SP
+	SPCOM_CMD_RESTART_SP    = 0x52535452, /* "RSTR" = 0x52535452 */
 };
 
 /*
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 35ba383..e3a089d 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -14,3 +14,4 @@
 header-y += cam_lrme.h
 header-y += radio-iris.h
 header-y += radio-iris-commands.h
+header-y += synx.h
diff --git a/include/uapi/media/synx.h b/include/uapi/media/synx.h
new file mode 100644
index 0000000..4ab7b3e
--- /dev/null
+++ b/include/uapi/media/synx.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_SYNX_H__
+#define __UAPI_SYNX_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define SYNX_USER_PAYLOAD_SIZE               4
+
+#define SYNX_STATE_INVALID                   0
+#define SYNX_STATE_ACTIVE                    1
+#define SYNX_STATE_SIGNALED_SUCCESS          2
+#define SYNX_STATE_SIGNALED_ERROR            3
+
+#define SYNX_MAX_WAITING_SYNX                16
+
+#define SYNX_CALLBACK_RESULT_SUCCESS         2
+#define SYNX_CALLBACK_RESULT_FAILED          3
+#define SYNX_CALLBACK_RESULT_CANCELED        4
+
+/**
+ * type of external sync object
+ *
+ * SYNX_TYPE_CSL  : Object is a CSL sync object
+ */
+#define SYNX_TYPE_CSL       0
+#define SYNX_MAX_BIND_TYPES 1
+/**
+ * struct synx_info - Sync object creation information
+ *
+ * @name     : Optional string representation of the synx object
+ * @synx_obj : Sync object returned after creation in kernel
+ */
+struct synx_info {
+	char name[64];
+	__s32 synx_obj;
+};
+
+/**
+ * struct synx_userpayload_info - Payload info from user space
+ *
+ * @synx_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct synx_userpayload_info {
+	__s32 synx_obj;
+	__u32 reserved;
+	__u64 payload[SYNX_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct synx_signal - Sync object signaling struct
+ *
+ * @synx_obj   : Sync object to be signaled
+ * @synx_state : State of the synx object to which it should be signaled
+ */
+struct synx_signal {
+	__s32 synx_obj;
+	__u32 synx_state;
+};
+
+/**
+ * struct synx_merge - Merge information for synx objects
+ *
+ * @synx_objs :  Pointer to synx object array to merge
+ * @num_objs  :  Number of objects in the array
+ * @merged    :  Merged synx object
+ */
+struct synx_merge {
+	__u64 synx_objs;
+	__u32 num_objs;
+	__s32 merged;
+};
+
+/**
+ * struct synx_wait - Sync object wait information
+ *
+ * @synx_obj   : Sync object to wait on
+ * @reserved   : Reserved
+ * @timeout_ms : Timeout in milliseconds
+ */
+struct synx_wait {
+	__s32 synx_obj;
+	__u32 reserved;
+	__u64 timeout_ms;
+};
+
+/**
+ * struct synx_external_desc - info of external sync object
+ *
+ * @type     : Synx type
+ * @reserved : Reserved
+ * @id       : Sync object id
+ *
+ */
+struct synx_external_desc {
+	__u32 type;
+	__u32 reserved;
+	__s32 id[2];
+};
+
+/**
+ * struct synx_bind - info for binding two synx objects
+ *
+ * @synx_obj      : Synx object
+ * @Reserved      : Reserved
+ * @ext_sync_desc : External synx to bind to
+ *
+ */
+struct synx_bind {
+	__s32 synx_obj;
+	__u32 reserved;
+	struct synx_external_desc ext_sync_desc;
+};
+
+/**
+ * struct synx_addrefcount - info for refcount increment
+ *
+ * @synx_obj : Synx object
+ * @count    : Count to increment
+ *
+ */
+struct synx_addrefcount {
+	__s32 synx_obj;
+	__u32 count;
+};
+
+/**
+ * struct synx_id_info - info for import and export of a synx object
+ *
+ * @synx_obj     : Synx object to be exported
+ * @secure_key   : Secure key created in export and used in import
+ * @new_synx_obj : Synx object created in import
+ *
+ */
+struct synx_id_info {
+	__s32 synx_obj;
+	__u32 secure_key;
+	__s32 new_synx_obj;
+	__u32 padding;
+};
+
+/**
+ * struct synx_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id        : IOCTL command id
+ * @size      : Size of command payload
+ * @result    : Result of command execution
+ * @reserved  : Reserved
+ * @ioctl_ptr : Pointer to user data
+ */
+struct synx_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__u64 ioctl_ptr;
+};
+
+#define SYNX_PRIVATE_MAGIC_NUM 's'
+
+#define SYNX_PRIVATE_IOCTL_CMD \
+	_IOWR(SYNX_PRIVATE_MAGIC_NUM, 130, struct synx_private_ioctl_arg)
+
+#define SYNX_CREATE                          0
+#define SYNX_RELEASE                         1
+#define SYNX_SIGNAL                          2
+#define SYNX_MERGE                           3
+#define SYNX_REGISTER_PAYLOAD                4
+#define SYNX_DEREGISTER_PAYLOAD              5
+#define SYNX_WAIT                            6
+#define SYNX_BIND                            7
+#define SYNX_ADDREFCOUNT                     8
+#define SYNX_GETSTATUS                       9
+#define SYNX_IMPORT                          10
+#define SYNX_EXPORT                          11
+
+#endif /* __UAPI_SYNX_H__ */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e80289..de8134bf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3863,11 +3863,11 @@
 static bool is_packing_eligible(struct task_struct *p, int target_cpu,
 				struct find_best_target_env *fbt_env,
 				unsigned int target_cpus_count,
-				int best_idle_cstate)
+				int best_idle_cstate, bool boosted)
 {
 	unsigned long tutil, estimated_capacity;
 
-	if (task_placement_boost_enabled(p) || fbt_env->need_idle)
+	if (task_placement_boost_enabled(p) || fbt_env->need_idle || boosted)
 		return false;
 
 	if (best_idle_cstate == -1)
@@ -6652,8 +6652,12 @@
 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
 	int start_cpu = -1;
 
-	if (boosted)
+	if (boosted) {
+		if (rd->mid_cap_orig_cpu != -1 &&
+		    task_fits_max(p, rd->mid_cap_orig_cpu))
+			return rd->mid_cap_orig_cpu;
 		return rd->max_cap_orig_cpu;
+	}
 
 	/* A task always fits on its rtg_target */
 	if (rtg_target) {
@@ -7026,10 +7030,10 @@
 		 * next cluster if they are higher in capacity. If we are
 		 * not in any kind of boost, we break.
 		 */
-		if (!prefer_idle &&
+		if (!prefer_idle && !boosted &&
 			(target_cpu != -1 || best_idle_cpu != -1) &&
 			(fbt_env->placement_boost == SCHED_BOOST_NONE ||
-			sched_boost() != FULL_THROTTLE_BOOST ||
+			!is_full_throttle_boost() ||
 			(fbt_env->placement_boost == SCHED_BOOST_ON_BIG &&
 				!next_group_higher_cap)))
 			break;
@@ -7037,9 +7041,12 @@
 		/*
 		 * if we are in prefer_idle and have found an idle cpu,
 		 * break from searching more groups based on the stune.boost and
-		 * group cpu capacity.
+		 * group cpu capacity. For !prefer_idle && boosted case, don't
+		 * iterate lower capacity CPUs unless the task can't be
+		 * accommodated in the higher capacity CPUs.
 		 */
-		if (prefer_idle && best_idle_cpu != -1) {
+		if ((prefer_idle && best_idle_cpu != -1) ||
+		    (boosted && (best_idle_cpu != -1 || target_cpu != -1))) {
 			if (boosted) {
 				if (!next_group_higher_cap)
 					break;
@@ -7052,7 +7059,7 @@
 	} while (sg = sg->next, sg != start_sd->groups);
 
 	if (best_idle_cpu != -1 && !is_packing_eligible(p, target_cpu, fbt_env,
-				active_cpus_count, shallowest_idle_cstate)) {
+			active_cpus_count, shallowest_idle_cstate, boosted)) {
 		target_cpu = best_idle_cpu;
 		best_idle_cpu = -1;
 	}
@@ -7368,6 +7375,7 @@
 	int placement_boost = task_boost_policy(p);
 	u64 start_t = 0;
 	int delta = 0;
+	int boosted = (schedtune_task_boost(p) > 0);
 
 	fbt_env.fastpath = 0;
 
@@ -7435,7 +7443,7 @@
 						 p->state == TASK_WAKING)
 		delta = task_util(p);
 #endif
-	if (task_placement_boost_enabled(p) || need_idle ||
+	if (task_placement_boost_enabled(p) || need_idle || boosted ||
 	    (rtg_target && (!cpumask_test_cpu(prev_cpu, rtg_target) ||
 	    cpumask_test_cpu(cpu, rtg_target))) ||
 	    __cpu_overutilized(prev_cpu, delta) ||
@@ -7465,7 +7473,8 @@
 sync_wakeup:
 	trace_sched_task_util(p, best_energy_cpu, sync,
 			need_idle, fbt_env.fastpath, placement_boost,
-			rtg_target ? cpumask_first(rtg_target) : -1, start_t);
+			rtg_target ? cpumask_first(rtg_target) : -1, start_t,
+			boosted);
 
 	/*
 	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ff96260..59bd851 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1767,9 +1767,7 @@
 	unsigned long tutil = task_util(task);
 	int best_cpu_idle_idx = INT_MAX;
 	int cpu_idle_idx = -1;
-	bool boost_on_big = sched_boost() == FULL_THROTTLE_BOOST ?
-				  (sched_boost_policy() == SCHED_BOOST_ON_BIG) :
-				  false;
+	bool boost_on_big = rt_boost_on_big();
 
 	rcu_read_lock();
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 854afe6..3bcecf5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2996,6 +2996,17 @@
 	return sched_boost_type;
 }
 
+static inline bool rt_boost_on_big(void)
+{
+	return sched_boost() == FULL_THROTTLE_BOOST ?
+			(sched_boost_policy() == SCHED_BOOST_ON_BIG) : false;
+}
+
+static inline bool is_full_throttle_boost(void)
+{
+	return sched_boost() == FULL_THROTTLE_BOOST;
+}
+
 extern int preferred_cluster(struct sched_cluster *cluster,
 						struct task_struct *p);
 extern struct sched_cluster *rq_cluster(struct rq *rq);
@@ -3142,6 +3153,16 @@
 	return 0;
 }
 
+static inline bool rt_boost_on_big(void)
+{
+	return false;
+}
+
+static inline bool is_full_throttle_boost(void)
+{
+	return false;
+}
+
 static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
 {
 	return SCHED_BOOST_NONE;
diff --git a/mm/cma.c b/mm/cma.c
index ee79240..6e5d76c 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -37,6 +37,7 @@
 #include <linux/io.h>
 #include <linux/kmemleak.h>
 #include <linux/delay.h>
+#include <linux/show_mem_notifier.h>
 #include <trace/events/cma.h>
 
 #include "cma.h"
@@ -98,6 +99,29 @@
 	mutex_unlock(&cma->lock);
 }
 
+static int cma_showmem_notifier(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	int i;
+	unsigned long used;
+	struct cma *cma;
+
+	for (i = 0; i < cma_area_count; i++) {
+		cma = &cma_areas[i];
+		used = bitmap_weight(cma->bitmap,
+				     (int)cma_bitmap_maxno(cma));
+		used <<= cma->order_per_bit;
+		pr_info("cma-%d pages: => %lu used of %lu total pages\n",
+			i, used, cma->count);
+	}
+
+	return 0;
+}
+
+static struct notifier_block cma_nb = {
+	.notifier_call = cma_showmem_notifier,
+};
+
 static int __init cma_activate_area(struct cma *cma)
 {
 	int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
@@ -162,6 +186,8 @@
 			return ret;
 	}
 
+	show_mem_notifier_register(&cma_nb);
+
 	return 0;
 }
 core_initcall(cma_init_reserved_areas);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index fef96a0..b565d2c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -42,6 +42,7 @@
 #include <linux/init.h>
 #include <linux/mmu_notifier.h>
 #include <linux/memory_hotplug.h>
+#include <linux/show_mem_notifier.h>
 
 #include <asm/tlb.h>
 #include "internal.h"
@@ -447,7 +448,10 @@
 		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
 		if (is_dump_unreclaim_slabs())
 			dump_unreclaimable_slab();
+
+		show_mem_call_notifiers();
 	}
+
 	if (sysctl_oom_dump_tasks)
 		dump_tasks(oc->memcg, oc->nodemask);
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c9a968..2159e74 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -63,6 +63,7 @@
 #include <linux/page_owner.h>
 #include <linux/kthread.h>
 #include <linux/memcontrol.h>
+#include <linux/show_mem_notifier.h>
 #include <linux/ftrace.h>
 #include <linux/lockdep.h>
 #include <linux/nmi.h>
@@ -3413,6 +3414,7 @@
 		filter &= ~SHOW_MEM_FILTER_NODES;
 
 	show_mem(filter, nodemask);
+	show_mem_call_notifiers();
 }
 
 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)